code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCAmelCase :
'''simple docstring'''
@staticmethod
def UpperCamelCase_ ( *A : int ,**A : Optional[int] ):
pass
@is_pipeline_test
@require_vision
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
snake_case_ = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def UpperCamelCase_ ( self : List[str] ,A : Union[str, Any] ,A : Union[str, Any] ,A : Any ):
__A = pipeline(
"zero-shot-object-detection" ,model="hf-internal-testing/tiny-random-owlvit-object-detection" )
__A = [
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
]
return object_detector, examples
def UpperCamelCase_ ( self : Optional[Any] ,A : List[Any] ,A : Tuple ):
__A = object_detector(examples[0] ,threshold=0.0 )
__A = len(snake_case__ )
self.assertGreater(snake_case__ ,0 )
self.assertEqual(
snake_case__ ,[
{
"score": ANY(snake_case__ ),
"label": ANY(snake_case__ ),
"box": {"xmin": ANY(snake_case__ ), "ymin": ANY(snake_case__ ), "xmax": ANY(snake_case__ ), "ymax": ANY(snake_case__ )},
}
for i in range(snake_case__ )
] ,)
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def UpperCamelCase_ ( self : List[str] ):
pass
@require_torch
def UpperCamelCase_ ( self : int ):
__A = pipeline(
"zero-shot-object-detection" ,model="hf-internal-testing/tiny-random-owlvit-object-detection" )
__A = object_detector(
"./tests/fixtures/tests_samples/COCO/000000039769.png" ,candidate_labels=["cat", "remote", "couch"] ,threshold=0.64 ,)
self.assertEqual(
nested_simplify(snake_case__ ,decimals=4 ) ,[
{"score": 0.72_35, "label": "cat", "box": {"xmin": 2_04, "ymin": 1_67, "xmax": 2_32, "ymax": 1_90}},
{"score": 0.72_18, "label": "remote", "box": {"xmin": 2_04, "ymin": 1_67, "xmax": 2_32, "ymax": 1_90}},
{"score": 0.71_84, "label": "couch", "box": {"xmin": 2_04, "ymin": 1_67, "xmax": 2_32, "ymax": 1_90}},
{"score": 0.67_48, "label": "remote", "box": {"xmin": 5_71, "ymin": 83, "xmax": 5_98, "ymax": 1_03}},
{"score": 0.66_56, "label": "cat", "box": {"xmin": 5_71, "ymin": 83, "xmax": 5_98, "ymax": 1_03}},
{"score": 0.66_14, "label": "couch", "box": {"xmin": 5_71, "ymin": 83, "xmax": 5_98, "ymax": 1_03}},
{"score": 0.64_56, "label": "remote", "box": {"xmin": 4_94, "ymin": 1_05, "xmax": 5_21, "ymax": 1_27}},
{"score": 0.6_42, "label": "remote", "box": {"xmin": 67, "ymin": 2_74, "xmax": 93, "ymax": 2_97}},
{"score": 0.64_19, "label": "cat", "box": {"xmin": 4_94, "ymin": 1_05, "xmax": 5_21, "ymax": 1_27}},
] ,)
__A = object_detector(
[
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
] ,threshold=0.64 ,)
self.assertEqual(
nested_simplify(snake_case__ ,decimals=4 ) ,[
[
{"score": 0.72_35, "label": "cat", "box": {"xmin": 2_04, "ymin": 1_67, "xmax": 2_32, "ymax": 1_90}},
{"score": 0.72_18, "label": "remote", "box": {"xmin": 2_04, "ymin": 1_67, "xmax": 2_32, "ymax": 1_90}},
{"score": 0.71_84, "label": "couch", "box": {"xmin": 2_04, "ymin": 1_67, "xmax": 2_32, "ymax": 1_90}},
{"score": 0.67_48, "label": "remote", "box": {"xmin": 5_71, "ymin": 83, "xmax": 5_98, "ymax": 1_03}},
{"score": 0.66_56, "label": "cat", "box": {"xmin": 5_71, "ymin": 83, "xmax": 5_98, "ymax": 1_03}},
{"score": 0.66_14, "label": "couch", "box": {"xmin": 5_71, "ymin": 83, "xmax": 5_98, "ymax": 1_03}},
{"score": 0.64_56, "label": "remote", "box": {"xmin": 4_94, "ymin": 1_05, "xmax": 5_21, "ymax": 1_27}},
{"score": 0.6_42, "label": "remote", "box": {"xmin": 67, "ymin": 2_74, "xmax": 93, "ymax": 2_97}},
{"score": 0.64_19, "label": "cat", "box": {"xmin": 4_94, "ymin": 1_05, "xmax": 5_21, "ymax": 1_27}},
]
] ,)
@require_torch
@slow
def UpperCamelCase_ ( self : List[Any] ):
__A = pipeline("zero-shot-object-detection" )
__A = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" ,candidate_labels=["cat", "remote", "couch"] ,)
self.assertEqual(
nested_simplify(snake_case__ ,decimals=4 ) ,[
{"score": 0.28_68, "label": "cat", "box": {"xmin": 3_24, "ymin": 20, "xmax": 6_40, "ymax": 3_73}},
{"score": 0.2_77, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 1_77, "ymax": 1_15}},
{"score": 0.25_37, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 3_15, "ymax": 4_72}},
{"score": 0.14_74, "label": "remote", "box": {"xmin": 3_35, "ymin": 74, "xmax": 3_71, "ymax": 1_87}},
{"score": 0.12_08, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_42, "ymax": 4_76}},
] ,)
__A = object_detector(
[
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
] ,)
self.assertEqual(
nested_simplify(snake_case__ ,decimals=4 ) ,[
[
{"score": 0.28_68, "label": "cat", "box": {"xmin": 3_24, "ymin": 20, "xmax": 6_40, "ymax": 3_73}},
{"score": 0.2_77, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 1_77, "ymax": 1_15}},
{"score": 0.25_37, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 3_15, "ymax": 4_72}},
{"score": 0.14_74, "label": "remote", "box": {"xmin": 3_35, "ymin": 74, "xmax": 3_71, "ymax": 1_87}},
{"score": 0.12_08, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_42, "ymax": 4_76}},
],
[
{"score": 0.28_68, "label": "cat", "box": {"xmin": 3_24, "ymin": 20, "xmax": 6_40, "ymax": 3_73}},
{"score": 0.2_77, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 1_77, "ymax": 1_15}},
{"score": 0.25_37, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 3_15, "ymax": 4_72}},
{"score": 0.14_74, "label": "remote", "box": {"xmin": 3_35, "ymin": 74, "xmax": 3_71, "ymax": 1_87}},
{"score": 0.12_08, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_42, "ymax": 4_76}},
],
] ,)
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def UpperCamelCase_ ( self : Dict ):
pass
@require_torch
@slow
def UpperCamelCase_ ( self : Tuple ):
__A = 0.2
__A = pipeline("zero-shot-object-detection" )
__A = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" ,candidate_labels=["cat", "remote", "couch"] ,threshold=snake_case__ ,)
self.assertEqual(
nested_simplify(snake_case__ ,decimals=4 ) ,[
{"score": 0.28_68, "label": "cat", "box": {"xmin": 3_24, "ymin": 20, "xmax": 6_40, "ymax": 3_73}},
{"score": 0.2_77, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 1_77, "ymax": 1_15}},
{"score": 0.25_37, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 3_15, "ymax": 4_72}},
] ,)
@require_torch
@slow
def UpperCamelCase_ ( self : Optional[Any] ):
__A = 2
__A = pipeline("zero-shot-object-detection" )
__A = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" ,candidate_labels=["cat", "remote", "couch"] ,top_k=snake_case__ ,)
self.assertEqual(
nested_simplify(snake_case__ ,decimals=4 ) ,[
{"score": 0.28_68, "label": "cat", "box": {"xmin": 3_24, "ymin": 20, "xmax": 6_40, "ymax": 3_73}},
{"score": 0.2_77, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 1_77, "ymax": 1_15}},
] ,)
| 15 |
"""simple docstring"""
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''linear''': get_linear_schedule_with_warmup,
'''cosine''': get_cosine_schedule_with_warmup,
'''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup,
'''polynomial''': get_polynomial_decay_schedule_with_warmup,
'''constant''': get_constant_schedule,
'''constant_w_warmup''': get_constant_schedule_with_warmup,
}
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
def __init__( self , snake_case__=None , snake_case__=None , *snake_case__ , **snake_case__ ):
"""simple docstring"""
super().__init__(*snake_case__ , **snake_case__ )
if config is None:
assert isinstance(self.model , snake_case__ ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f""" {self.model.__class__}"""
)
lowerCAmelCase : Optional[int] = self.model.config
else:
lowerCAmelCase : List[str] = config
lowerCAmelCase : Any = data_args
lowerCAmelCase : Tuple = self.config.tgt_vocab_size if isinstance(self.config , snake_case__ ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f"""The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"""
" padding.." )
if self.args.label_smoothing == 0:
lowerCAmelCase : int = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
lowerCAmelCase : Tuple = label_smoothed_nll_loss
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
if self.optimizer is None:
lowerCAmelCase : Optional[int] = ["bias", "LayerNorm.weight"]
lowerCAmelCase : str = [
{
"params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"weight_decay": self.args.weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"weight_decay": 0.0,
},
]
lowerCAmelCase : Union[str, Any] = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
lowerCAmelCase : Dict = Adafactor
lowerCAmelCase : Optional[int] = {"scale_parameter": False, "relative_step": False}
else:
lowerCAmelCase : int = AdamW
lowerCAmelCase : int = {
"betas": (self.args.adam_betaa, self.args.adam_betaa),
"eps": self.args.adam_epsilon,
}
lowerCAmelCase : Any = self.args.learning_rate
if self.sharded_ddp:
lowerCAmelCase : int = OSS(
params=snake_case__ , optim=snake_case__ , **snake_case__ , )
else:
lowerCAmelCase : Any = optimizer_cls(snake_case__ , **snake_case__ )
if self.lr_scheduler is None:
lowerCAmelCase : Tuple = self._get_lr_scheduler(snake_case__ )
else: # ignoring --lr_scheduler
logger.warning("scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored." )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
lowerCAmelCase : Tuple = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
lowerCAmelCase : Any = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
lowerCAmelCase : str = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=snake_case__ )
return scheduler
def lowercase__ ( self ):
"""simple docstring"""
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
lowerCAmelCase : Dict = model(**snake_case__ , use_cache=snake_case__ )[0]
lowerCAmelCase : List[Any] = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
lowerCAmelCase , lowerCAmelCase : str = model(**snake_case__ , labels=snake_case__ , use_cache=snake_case__ )[:2]
else:
# compute label smoothed loss
lowerCAmelCase : int = model(**snake_case__ , use_cache=snake_case__ )[0]
lowerCAmelCase : List[Any] = torch.nn.functional.log_softmax(snake_case__ , dim=-1 )
lowerCAmelCase , lowerCAmelCase : str = self.loss_fn(snake_case__ , snake_case__ , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Tuple = inputs.pop("labels" )
lowerCAmelCase , lowerCAmelCase : str = self._compute_loss(snake_case__ , snake_case__ , snake_case__ )
return loss
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , ):
"""simple docstring"""
lowerCAmelCase : List[str] = self._prepare_inputs(snake_case__ )
lowerCAmelCase : Union[str, Any] = {
"max_length": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"num_beams": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
lowerCAmelCase : Dict = self.model.generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , **snake_case__ , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
lowerCAmelCase : Dict = self._pad_tensors_to_max_len(snake_case__ , gen_kwargs["max_length"] )
lowerCAmelCase : Optional[Any] = inputs.pop("labels" )
with torch.no_grad():
# compute loss on predict data
lowerCAmelCase , lowerCAmelCase : Dict = self._compute_loss(snake_case__ , snake_case__ , snake_case__ )
lowerCAmelCase : List[str] = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
lowerCAmelCase : int = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
lowerCAmelCase : Optional[int] = self._pad_tensors_to_max_len(snake_case__ , gen_kwargs["max_length"] )
return (loss, logits, labels)
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : List[Any] = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"
f""" padded to `max_length`={max_length}""" )
lowerCAmelCase : Optional[Any] = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
lowerCAmelCase : int = tensor
return padded_tensor
| 108 | 0 |
'''simple docstring'''
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowercase : Dict = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
class lowerCAmelCase__ ( lowerCamelCase_ , unittest.TestCase ):
lowerCAmelCase_ = XLMProphetNetTokenizer
lowerCAmelCase_ = False
lowerCAmelCase_ = True
def _snake_case ( self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowercase_ : Optional[int] = XLMProphetNetTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(self.tmpdirname )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : str = '''[PAD]'''
lowercase_ : Tuple = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''[PAD]''' )
self.assertEqual(vocab_keys[1] , '''[CLS]''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 10_12 )
def _snake_case ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 10_12 )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : List[Any] = XLMProphetNetTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__SCREAMING_SNAKE_CASE , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
lowercase_ : str = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
lowercase_ : Optional[int] = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
lowercase_ : int = tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''[UNK]''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''[UNK]''',
'''.''',
] , )
@cached_property
def _snake_case ( self ):
"""simple docstring"""
return XLMProphetNetTokenizer.from_pretrained('''microsoft/xprophetnet-large-wiki100-cased''' )
@slow
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Dict = '''Hello World!'''
lowercase_ : str = [3_53_89, 66_72, 49, 2]
self.assertListEqual(__SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(__SCREAMING_SNAKE_CASE ) )
@slow
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Union[str, Any] = {'''input_ids''': [[1_10_73, 8_27_83, 18, 26, 8_27_83, 5_49, 5_15_40, 2_48, 1_72_09, 13_01, 2_17, 20, 21_51_86, 13_25, 1_47, 1_72_09, 13_01, 2_17, 20, 5_63_70, 53, 12_20_20, 20, 1_64_77, 27, 8_73_55, 45_48, 20, 47_28, 7_83_92, 17, 15_99_69, 18, 26, 2_44_91, 6_29, 15, 5_38, 2_27_04, 54_39, 15, 27_88, 2_44_91, 98_85, 15, 4_35_34, 6_05, 15, 8_14, 1_84_03, 3_32_00, 29, 15, 4_35_34, 2_44_58, 1_24_10, 1_11, 2_49_66, 8_36_69, 96_37, 14_40_68, 26, 8_50, 2_23_46, 27, 1_47, 2_49_66, 8_36_69, 8_34_90, 26, 3_91_13, 7_35, 27, 6_89, 6_56, 28_00, 13_39, 46_00, 53, 12_20_20, 11_57_85, 34, 8_16, 13_39, 4_68_87, 18, 1_47, 5_39_05, 19_51, 4_22_38, 4_11_70, 1_77_32, 8_34, 4_36, 15, 2_75_23, 9_87_33, 2_17, 1_47, 55_42, 49_81, 9_30, 1_73_47, 16, 2], [2_00_91, 6_29, 94, 8_27_86, 58, 4_90, 20, 15_28, 84, 5_39_05, 3_44, 8_05_92, 11_01_28, 1_88_22, 52_67, 13_06, 62, 15_25_37, 3_08, 79_97, 4_01, 12_44_27, 5_49, 3_54_42, 2_25, 1_09, 1_50_55, 2_57_48, 1_47, 71_19, 4_37_12, 34, 7_67, 13_53_66, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_92, 6_37_84, 11_94_66, 17, 14_78_08, 8_82_14, 18, 6_56, 81, 32, 32_96, 1_02_80, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__SCREAMING_SNAKE_CASE , model_name='''microsoft/xprophetnet-large-wiki100-cased''' , revision='''1acad1643ddd54a44df6a1b797ada8373685d90e''' , )
| 264 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase : List[Any] = logging.get_logger(__name__)
_lowercase : Optional[Any] = {
"google/vit-base-patch16-224": "https://huggingface.co/vit-base-patch16-224/resolve/main/config.json",
# See all ViT models at https://huggingface.co/models?filter=vit
}
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = '''vit'''
def __init__( self , __SCREAMING_SNAKE_CASE=7_68 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=30_72 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1E-1_2 , __SCREAMING_SNAKE_CASE=2_24 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=16 , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = hidden_size
lowercase_ : Dict = num_hidden_layers
lowercase_ : List[Any] = num_attention_heads
lowercase_ : Any = intermediate_size
lowercase_ : Union[str, Any] = hidden_act
lowercase_ : Dict = hidden_dropout_prob
lowercase_ : List[str] = attention_probs_dropout_prob
lowercase_ : Any = initializer_range
lowercase_ : Tuple = layer_norm_eps
lowercase_ : Union[str, Any] = image_size
lowercase_ : Tuple = patch_size
lowercase_ : Tuple = num_channels
lowercase_ : Union[str, Any] = qkv_bias
lowercase_ : List[Any] = encoder_stride
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = version.parse('''1.11''' )
@property
def _snake_case ( self ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _snake_case ( self ):
"""simple docstring"""
return 1E-4
| 264 | 1 |
"""simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
snake_case__ : List[Any] = '''platform'''
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class snake_case_:
__UpperCamelCase = PegasusConfig
__UpperCamelCase = {}
__UpperCamelCase = '''gelu'''
def __init__( self : List[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Any=1_3 , UpperCamelCase_ : List[Any]=7 , UpperCamelCase_ : Tuple=True , UpperCamelCase_ : List[Any]=False , UpperCamelCase_ : Optional[Any]=9_9 , UpperCamelCase_ : Any=3_2 , UpperCamelCase_ : List[Any]=5 , UpperCamelCase_ : str=4 , UpperCamelCase_ : str=3_7 , UpperCamelCase_ : Dict=0.1 , UpperCamelCase_ : Dict=0.1 , UpperCamelCase_ : Any=2_0 , UpperCamelCase_ : Dict=2 , UpperCamelCase_ : List[str]=1 , UpperCamelCase_ : Any=0 , ):
lowerCAmelCase : List[Any] = parent
lowerCAmelCase : Optional[int] = batch_size
lowerCAmelCase : Any = seq_length
lowerCAmelCase : Dict = is_training
lowerCAmelCase : Optional[int] = use_labels
lowerCAmelCase : Union[str, Any] = vocab_size
lowerCAmelCase : Tuple = hidden_size
lowerCAmelCase : Any = num_hidden_layers
lowerCAmelCase : List[str] = num_attention_heads
lowerCAmelCase : Optional[Any] = intermediate_size
lowerCAmelCase : Optional[int] = hidden_dropout_prob
lowerCAmelCase : List[Any] = attention_probs_dropout_prob
lowerCAmelCase : str = max_position_embeddings
lowerCAmelCase : str = eos_token_id
lowerCAmelCase : List[Any] = pad_token_id
lowerCAmelCase : List[str] = bos_token_id
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
lowerCAmelCase : Union[str, Any] = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
lowerCAmelCase : List[str] = np.concatenate([input_ids, eos_tensor] , axis=1 )
lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : Optional[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowerCAmelCase : Dict = prepare_pegasus_inputs_dict(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return config, inputs_dict
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Dict ):
lowerCAmelCase : Any = 2_0
lowerCAmelCase : Any = model_class_name(UpperCamelCase_ )
lowerCAmelCase : List[str] = model.encode(inputs_dict['''input_ids'''] )
lowerCAmelCase, lowerCAmelCase : Optional[Any] = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
lowerCAmelCase : Any = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
lowerCAmelCase : Dict = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCAmelCase : Optional[int] = model.decode(
decoder_input_ids[:, :-1] , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ , decoder_position_ids=UpperCamelCase_ , )
lowerCAmelCase : int = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
lowerCAmelCase : int = model.decode(
decoder_input_ids[:, -1:] , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCamelCase_ , )
lowerCAmelCase : List[Any] = model.decode(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Any , UpperCamelCase_ : Dict ):
lowerCAmelCase : Dict = 2_0
lowerCAmelCase : Union[str, Any] = model_class_name(UpperCamelCase_ )
lowerCAmelCase : Any = model.encode(inputs_dict['''input_ids'''] )
lowerCAmelCase, lowerCAmelCase : str = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
lowerCAmelCase : Any = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
lowerCAmelCase : Optional[int] = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : int = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCAmelCase : List[str] = model.decode(
decoder_input_ids[:, :-1] , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ , decoder_position_ids=UpperCamelCase_ , )
lowerCAmelCase : Tuple = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
lowerCAmelCase : Optional[int] = model.decode(
decoder_input_ids[:, -1:] , UpperCamelCase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCamelCase_ , decoder_position_ids=UpperCamelCase_ , )
lowerCAmelCase : List[Any] = model.decode(UpperCamelCase_ , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ )
lowerCAmelCase : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
def _snake_case ( _snake_case : Tuple , _snake_case : Dict , _snake_case : Dict , _snake_case : Optional[Any]=None , _snake_case : Dict=None , ):
if attention_mask is None:
lowerCAmelCase : Tuple = np.not_equal(_snake_case , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
lowerCAmelCase : Dict = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class snake_case_( a__ , unittest.TestCase ):
__UpperCamelCase = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
__UpperCamelCase = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
__UpperCamelCase = True
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase : Optional[Any] = FlaxPegasusModelTester(self )
lowerCAmelCase : Tuple = ConfigTester(self , config_class=UpperCamelCase_ )
def lowerCamelCase__ ( self : str ):
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase, lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : Any ):
lowerCAmelCase, lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase, lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase : str = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Tuple = model_class(UpperCamelCase_ )
@jax.jit
def encode_jitted(UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[int]=None , **UpperCamelCase_ : Tuple ):
return model.encode(input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ )
with self.subTest('''JIT Enabled''' ):
lowerCAmelCase : Tuple = encode_jitted(**UpperCamelCase_ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowerCAmelCase : Dict = encode_jitted(**UpperCamelCase_ ).to_tuple()
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
for jitted_output, output in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase, lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase : Optional[int] = model_class(UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
lowerCAmelCase : Any = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(UpperCamelCase_ : Dict , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] ):
return model.decode(
decoder_input_ids=UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , encoder_outputs=UpperCamelCase_ , )
with self.subTest('''JIT Enabled''' ):
lowerCAmelCase : Optional[Any] = decode_jitted(**UpperCamelCase_ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowerCAmelCase : Any = decode_jitted(**UpperCamelCase_ ).to_tuple()
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
for jitted_output, output in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowerCamelCase__ ( self : str ):
for model_class_name in self.all_model_classes:
lowerCAmelCase : int = model_class_name.from_pretrained('''google/pegasus-large''' , from_pt=UpperCamelCase_ )
lowerCAmelCase : List[Any] = np.ones((1, 1) )
lowerCAmelCase : str = model(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
@slow
def lowerCamelCase__ ( self : int ):
lowerCAmelCase : Any = FlaxPegasusForConditionalGeneration.from_pretrained('''google/pegasus-xsum''' )
lowerCAmelCase : List[Any] = PegasusTokenizer.from_pretrained('''google/pegasus-xsum''' )
lowerCAmelCase : int = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
lowerCAmelCase : str = [
'''California\'s largest electricity provider has turned off power to hundreds of thousands of customers.''',
'''Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.''',
]
lowerCAmelCase : Optional[Any] = tokenizer(UpperCamelCase_ , return_tensors='''np''' , truncation=UpperCamelCase_ , max_length=5_1_2 , padding=UpperCamelCase_ )
lowerCAmelCase : Optional[int] = model.generate(**UpperCamelCase_ , num_beams=2 ).sequences
lowerCAmelCase : Tuple = tokenizer.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
assert tgt_text == decoded
| 60 |
"""simple docstring"""
def __lowerCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
snake_case : Dict = []
snake_case : List[Any] = 1
while len(lowercase ) < 1e6:
constant.append(str(lowercase ) )
i += 1
snake_case : Tuple = "".join(lowercase )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[9_9999] )
* int(constant[99_9999] )
)
if __name__ == "__main__":
print(solution())
| 203 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ : Dict = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : int = [
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 120 |
def SCREAMING_SNAKE_CASE_ ( __A : int ) -> int:
"""simple docstring"""
if not isinstance(__A , __A ):
raise ValueError('Input must be an integer' )
if input_num <= 0:
raise ValueError('Input must be positive' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 120 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json""",
}
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = 'gpt_neox_japanese'
def __init__( self , lowercase=32000 , lowercase=2560 , lowercase=32 , lowercase=32 , lowercase=4 , lowercase="gelu" , lowercase=1.00 , lowercase=10000 , lowercase=2048 , lowercase=0.02 , lowercase=1e-5 , lowercase=True , lowercase=31996 , lowercase=31999 , lowercase=0.1 , lowercase=0.0 , **lowercase , ) -> Dict:
'''simple docstring'''
super().__init__(bos_token_id=lowercase , eos_token_id=lowercase , **lowercase )
A__ = vocab_size
A__ = max_position_embeddings
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_multiple_size
A__ = hidden_act
A__ = rotary_pct
A__ = rotary_emb_base
A__ = initializer_range
A__ = layer_norm_eps
A__ = use_cache
A__ = attention_dropout
A__ = hidden_dropout
| 68 |
import warnings
from functools import wraps
from typing import Callable
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Callable ) -> Callable:
'''simple docstring'''
@wraps(SCREAMING_SNAKE_CASE_ )
def _inner_fn(*SCREAMING_SNAKE_CASE_: int , **SCREAMING_SNAKE_CASE_: Union[str, Any] ):
warnings.warn(
(F'\'{fn.__name__}\' is experimental and might be subject to breaking changes in the future.') , SCREAMING_SNAKE_CASE_ , )
return fn(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
return _inner_fn
| 68 | 1 |
'''simple docstring'''
from __future__ import annotations
from math import ceil, floor, sqrt
def UpperCamelCase ( a = 200_0000 ) -> int:
'''simple docstring'''
__magic_name__ = [0]
__magic_name__ = 42
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
__magic_name__ = 0
# the area corresponding to the grid that gives the product closest to target
__magic_name__ = 0
# an estimate of b, using the quadratic formula
__magic_name__ = 42
# the largest integer less than b_estimate
__magic_name__ = 42
# the largest integer less than b_estimate
__magic_name__ = 42
# the triangle number corresponding to b_floor
__magic_name__ = 42
# the triangle number corresponding to b_ceil
__magic_name__ = 42
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
__magic_name__ = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
__magic_name__ = floor(a )
__magic_name__ = ceil(a )
__magic_name__ = triangle_numbers[b_floor]
__magic_name__ = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
__magic_name__ = triangle_b_first_guess * triangle_a
__magic_name__ = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
__magic_name__ = triangle_b_second_guess * triangle_a
__magic_name__ = idx_a * b_ceil
return area
if __name__ == "__main__":
print(F'''{solution() = }''')
| 98 |
'''simple docstring'''
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
def UpperCamelCase ( a ) -> str:
'''simple docstring'''
__magic_name__ = SwinConfig.from_pretrained(
'''microsoft/swin-tiny-patch4-window7-224''' , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
__magic_name__ = MaskFormerConfig(backbone_config=a )
__magic_name__ = '''huggingface/label-files'''
if "ade20k-full" in model_name:
# this should be ok
__magic_name__ = 847
__magic_name__ = '''maskformer-ade20k-full-id2label.json'''
elif "ade" in model_name:
# this should be ok
__magic_name__ = 150
__magic_name__ = '''ade20k-id2label.json'''
elif "coco-stuff" in model_name:
# this should be ok
__magic_name__ = 171
__magic_name__ = '''maskformer-coco-stuff-id2label.json'''
elif "coco" in model_name:
# TODO
__magic_name__ = 133
__magic_name__ = '''coco-panoptic-id2label.json'''
elif "cityscapes" in model_name:
# this should be ok
__magic_name__ = 19
__magic_name__ = '''cityscapes-id2label.json'''
elif "vistas" in model_name:
# this should be ok
__magic_name__ = 65
__magic_name__ = '''mapillary-vistas-id2label.json'''
__magic_name__ = json.load(open(hf_hub_download(a , a , repo_type='''dataset''' ) , '''r''' ) )
__magic_name__ = {int(a ): v for k, v in idalabel.items()}
return config
def UpperCamelCase ( a ) -> Tuple:
'''simple docstring'''
__magic_name__ = []
# stem
# fmt: off
rename_keys.append(('''backbone.patch_embed.proj.weight''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.patch_embed.proj.bias''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.patch_embed.norm.weight''', '''model.pixel_level_module.encoder.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.patch_embed.norm.bias''', '''model.pixel_level_module.encoder.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm1.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm1.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.relative_position_index''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.proj.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.proj.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm2.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm2.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc1.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc1.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc2.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc2.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((F'''backbone.layers.{i}.downsample.reduction.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((F'''backbone.layers.{i}.downsample.norm.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((F'''backbone.layers.{i}.downsample.norm.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((F'''backbone.norm{i}.weight''', F'''model.pixel_level_module.encoder.hidden_states_norms.{i}.weight''') )
rename_keys.append((F'''backbone.norm{i}.bias''', F'''model.pixel_level_module.encoder.hidden_states_norms.{i}.bias''') )
# FPN
rename_keys.append(('''sem_seg_head.layer_4.weight''', '''model.pixel_level_module.decoder.fpn.stem.0.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.weight''', '''model.pixel_level_module.decoder.fpn.stem.1.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.bias''', '''model.pixel_level_module.decoder.fpn.stem.1.bias''') )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((F'''sem_seg_head.adapter_{source_index}.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight''') )
rename_keys.append((F'''sem_seg_head.adapter_{source_index}.norm.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight''') )
rename_keys.append((F'''sem_seg_head.adapter_{source_index}.norm.bias''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias''') )
rename_keys.append((F'''sem_seg_head.layer_{source_index}.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight''') )
rename_keys.append((F'''sem_seg_head.layer_{source_index}.norm.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight''') )
rename_keys.append((F'''sem_seg_head.layer_{source_index}.norm.bias''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias''') )
rename_keys.append(('''sem_seg_head.mask_features.weight''', '''model.pixel_level_module.decoder.mask_projection.weight''') )
rename_keys.append(('''sem_seg_head.mask_features.bias''', '''model.pixel_level_module.decoder.mask_projection.bias''') )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight''', F'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias''', F'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias''') )
# cross-attention out projection
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias''') )
# MLP 1
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight''', F'''model.transformer_module.decoder.layers.{idx}.fc1.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias''', F'''model.transformer_module.decoder.layers.{idx}.fc1.bias''') )
# MLP 2
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight''', F'''model.transformer_module.decoder.layers.{idx}.fc2.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias''', F'''model.transformer_module.decoder.layers.{idx}.fc2.bias''') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight''', F'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias''', F'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias''') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias''') )
# layernorm 3 (final layernorm)
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight''', F'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias''', F'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias''') )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.weight''', '''model.transformer_module.decoder.layernorm.weight''') )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.bias''', '''model.transformer_module.decoder.layernorm.bias''') )
# heads on top
rename_keys.append(('''sem_seg_head.predictor.query_embed.weight''', '''model.transformer_module.queries_embedder.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.weight''', '''model.transformer_module.input_projection.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.bias''', '''model.transformer_module.input_projection.bias''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.weight''', '''class_predictor.weight''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.bias''', '''class_predictor.bias''') )
for i in range(3 ):
rename_keys.append((F'''sem_seg_head.predictor.mask_embed.layers.{i}.weight''', F'''mask_embedder.{i}.0.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.mask_embed.layers.{i}.bias''', F'''mask_embedder.{i}.0.bias''') )
# fmt: on
return rename_keys
def UpperCamelCase ( a , a , a ) -> str:
'''simple docstring'''
__magic_name__ = dct.pop(a )
__magic_name__ = val
def UpperCamelCase ( a , a ) -> List[str]:
'''simple docstring'''
__magic_name__ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__magic_name__ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__magic_name__ = state_dict.pop(F'''backbone.layers.{i}.blocks.{j}.attn.qkv.weight''' )
__magic_name__ = state_dict.pop(F'''backbone.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
__magic_name__ = in_proj_weight[:dim, :]
__magic_name__ = in_proj_bias[: dim]
__magic_name__ = in_proj_weight[
dim : dim * 2, :
]
__magic_name__ = in_proj_bias[
dim : dim * 2
]
__magic_name__ = in_proj_weight[
-dim :, :
]
__magic_name__ = in_proj_bias[-dim :]
# fmt: on
def UpperCamelCase ( a , a ) -> int:
'''simple docstring'''
# fmt: off
__magic_name__ = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
__magic_name__ = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight''' )
__magic_name__ = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
__magic_name__ = in_proj_weight[: hidden_size, :]
__magic_name__ = in_proj_bias[:config.hidden_size]
__magic_name__ = in_proj_weight[hidden_size : hidden_size * 2, :]
__magic_name__ = in_proj_bias[hidden_size : hidden_size * 2]
__magic_name__ = in_proj_weight[-hidden_size :, :]
__magic_name__ = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
__magic_name__ = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight''' )
__magic_name__ = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
__magic_name__ = in_proj_weight[: hidden_size, :]
__magic_name__ = in_proj_bias[:config.hidden_size]
__magic_name__ = in_proj_weight[hidden_size : hidden_size * 2, :]
__magic_name__ = in_proj_bias[hidden_size : hidden_size * 2]
__magic_name__ = in_proj_weight[-hidden_size :, :]
__magic_name__ = in_proj_bias[-hidden_size :]
# fmt: on
def UpperCamelCase ( ) -> torch.Tensor:
'''simple docstring'''
__magic_name__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__magic_name__ = Image.open(requests.get(a , stream=a ).raw )
return im
@torch.no_grad()
def UpperCamelCase ( a , a , a , a = False ) -> Dict:
'''simple docstring'''
__magic_name__ = get_maskformer_config(a )
# load original state_dict
with open(a , '''rb''' ) as f:
__magic_name__ = pickle.load(a )
__magic_name__ = data['''model''']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
__magic_name__ = create_rename_keys(a )
for src, dest in rename_keys:
rename_key(a , a , a )
read_in_swin_q_k_v(a , config.backbone_config )
read_in_decoder_q_k_v(a , a )
# update to torch tensors
for key, value in state_dict.items():
__magic_name__ = torch.from_numpy(a )
# load 🤗 model
__magic_name__ = MaskFormerForInstanceSegmentation(a )
model.eval()
for name, param in model.named_parameters():
print(a , param.shape )
__magic_name__ , __magic_name__ = model.load_state_dict(a , strict=a )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(a ) == 0, F'''Unexpected keys: {unexpected_keys}'''
# verify results
__magic_name__ = prepare_img()
if "vistas" in model_name:
__magic_name__ = 65
elif "cityscapes" in model_name:
__magic_name__ = 6_5535
else:
__magic_name__ = 255
__magic_name__ = True if '''ade''' in model_name else False
__magic_name__ = MaskFormerImageProcessor(ignore_index=a , reduce_labels=a )
__magic_name__ = image_processor(a , return_tensors='''pt''' )
__magic_name__ = model(**a )
print('''Logits:''' , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
__magic_name__ = torch.tensor(
[[3.63_53, -4.47_70, -2.60_65], [0.50_81, -4.23_94, -3.53_43], [2.19_09, -5.03_53, -1.93_23]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , a , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and image processor to {pytorch_dump_folder_path}''' )
Path(a ).mkdir(exist_ok=a )
model.save_pretrained(a )
image_processor.save_pretrained(a )
if push_to_hub:
print('''Pushing model and image processor to the hub...''' )
model.push_to_hub(F'''nielsr/{model_name}''' )
image_processor.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="maskformer-swin-tiny-ade",
type=str,
help=("Name of the MaskFormer model you'd like to convert",),
)
parser.add_argument(
"--checkpoint_path",
default="/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl",
type=str,
help="Path to the original state dict (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_lowerCAmelCase = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 98 | 1 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase = logging.get_logger(__name__)
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = original_name.split('.' )[0]
lowercase = key.split('.' )
lowercase = int(key_list[key_list.index(__SCREAMING_SNAKE_CASE ) - 2] )
lowercase = int(key_list[key_list.index(__SCREAMING_SNAKE_CASE ) - 1] )
lowercase = orig_block_num - offset
lowercase = key.replace(F'''{orig_block_num}.{layer_num}.{original_name}''' , F'''block.{new_block_num}.{layer_num}.{new_name}''' )
return key
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = OrderedDict()
lowercase , lowercase = 0, 0
for key, value in state_dict.items():
if key.startswith('network' ):
lowercase = key.replace('network' , 'poolformer.encoder' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('bias' ) and "patch_embed" not in key:
patch_emb_offset += 1
lowercase = key[: key.find('proj' )]
lowercase = key.replace(__SCREAMING_SNAKE_CASE , F'''patch_embeddings.{total_embed_found}.''' )
lowercase = key.replace('proj' , 'projection' )
if key.endswith('bias' ):
total_embed_found += 1
if "patch_embeddings" in key:
lowercase = 'poolformer.encoder.' + key
if "mlp.fc1" in key:
lowercase = replace_key_with_offset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , 'mlp.fc1' , 'output.conv1' )
if "mlp.fc2" in key:
lowercase = replace_key_with_offset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , 'mlp.fc2' , 'output.conv2' )
if "norm1" in key:
lowercase = replace_key_with_offset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , 'norm1' , 'before_norm' )
if "norm2" in key:
lowercase = replace_key_with_offset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , 'norm2' , 'after_norm' )
if "layer_scale_1" in key:
lowercase = replace_key_with_offset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , 'layer_scale_1' , 'layer_scale_1' )
if "layer_scale_2" in key:
lowercase = replace_key_with_offset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , 'layer_scale_2' , 'layer_scale_2' )
if "head" in key:
lowercase = key.replace('head' , 'classifier' )
lowercase = value
return new_state_dict
def UpperCAmelCase_ ( ):
lowercase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowercase = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw )
return image
@torch.no_grad()
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = PoolFormerConfig()
# set attributes based on model_name
lowercase = 'huggingface/label-files'
lowercase = model_name[-3:]
lowercase = 1000
lowercase = 'imagenet-1k-id2label.json'
lowercase = (1, 1000)
# set config attributes
lowercase = json.load(open(hf_hub_download(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
lowercase = {int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowercase = idalabel
lowercase = {v: k for k, v in idalabel.items()}
if size == "s12":
lowercase = [2, 2, 6, 2]
lowercase = [64, 128, 320, 512]
lowercase = 4.0
lowercase = 0.9
elif size == "s24":
lowercase = [4, 4, 12, 4]
lowercase = [64, 128, 320, 512]
lowercase = 4.0
lowercase = 0.9
elif size == "s36":
lowercase = [6, 6, 18, 6]
lowercase = [64, 128, 320, 512]
lowercase = 4.0
lowercase = 1e-6
lowercase = 0.9
elif size == "m36":
lowercase = [6, 6, 18, 6]
lowercase = [96, 192, 384, 768]
lowercase = 4.0
lowercase = 1e-6
lowercase = 0.95
elif size == "m48":
lowercase = [8, 8, 24, 8]
lowercase = [96, 192, 384, 768]
lowercase = 4.0
lowercase = 1e-6
lowercase = 0.95
else:
raise ValueError(F'''Size {size} not supported''' )
# load image processor
lowercase = PoolFormerImageProcessor(crop_pct=__SCREAMING_SNAKE_CASE )
# Prepare image
lowercase = prepare_img()
lowercase = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
logger.info(F'''Converting model {model_name}...''' )
# load original state dict
lowercase = torch.load(__SCREAMING_SNAKE_CASE , map_location=torch.device('cpu' ) )
# rename keys
lowercase = rename_keys(__SCREAMING_SNAKE_CASE )
# create HuggingFace model and load state dict
lowercase = PoolFormerForImageClassification(__SCREAMING_SNAKE_CASE )
model.load_state_dict(__SCREAMING_SNAKE_CASE )
model.eval()
# Define image processor
lowercase = PoolFormerImageProcessor(crop_pct=__SCREAMING_SNAKE_CASE )
lowercase = image_processor(images=prepare_img() , return_tensors='pt' ).pixel_values
# forward pass
lowercase = model(__SCREAMING_SNAKE_CASE )
lowercase = outputs.logits
# define expected logit slices for different models
if size == "s12":
lowercase = torch.tensor([-0.30_45, -0.67_58, -0.48_69] )
elif size == "s24":
lowercase = torch.tensor([0.44_02, -0.13_74, -0.80_45] )
elif size == "s36":
lowercase = torch.tensor([-0.60_80, -0.51_33, -0.58_98] )
elif size == "m36":
lowercase = torch.tensor([0.39_52, 0.22_63, -1.26_68] )
elif size == "m48":
lowercase = torch.tensor([0.11_67, -0.06_56, -0.34_23] )
else:
raise ValueError(F'''Size {size} not supported''' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1e-2 )
# finally, save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''poolformer_s12''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
UpperCAmelCase = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 195 |
from manim import *
class A_ ( __lowerCamelCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = Rectangle(height=0.5 , width=0.5 )
lowercase = Rectangle(height=0.25 , width=0.25 )
lowercase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowercase = [mem.copy() for i in range(6 )]
lowercase = [mem.copy() for i in range(6 )]
lowercase = VGroup(*snake_case ).arrange(snake_case , buff=0 )
lowercase = VGroup(*snake_case ).arrange(snake_case , buff=0 )
lowercase = VGroup(snake_case , snake_case ).arrange(snake_case , buff=0 )
lowercase = Text('CPU' , font_size=24 )
lowercase = Group(snake_case , snake_case ).arrange(snake_case , buff=0.5 , aligned_edge=snake_case )
cpu.move_to([-2.5, -0.5, 0] )
self.add(snake_case )
lowercase = [mem.copy() for i in range(4 )]
lowercase = VGroup(*snake_case ).arrange(snake_case , buff=0 )
lowercase = Text('GPU' , font_size=24 )
lowercase = Group(snake_case , snake_case ).arrange(snake_case , buff=0.5 , aligned_edge=snake_case )
gpu.move_to([-1, -1, 0] )
self.add(snake_case )
lowercase = [mem.copy() for i in range(6 )]
lowercase = VGroup(*snake_case ).arrange(snake_case , buff=0 )
lowercase = Text('Model' , font_size=24 )
lowercase = Group(snake_case , snake_case ).arrange(snake_case , buff=0.5 , aligned_edge=snake_case )
model.move_to([3, -1.0, 0] )
self.add(snake_case )
lowercase = []
lowercase = []
lowercase = []
for i, rect in enumerate(snake_case ):
rect.set_stroke(snake_case )
lowercase = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(snake_case , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=snake_case )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=snake_case , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=snake_case , buff=0.0 )
self.add(snake_case )
model_cpu_arr.append(snake_case )
self.add(*snake_case , *snake_case , *snake_case )
lowercase = [mem.copy() for i in range(6 )]
lowercase = VGroup(*snake_case ).arrange(snake_case , buff=0 )
lowercase = Text('Loaded Checkpoint' , font_size=24 )
lowercase = Group(snake_case , snake_case ).arrange(snake_case , buff=0.5 , aligned_edge=snake_case )
checkpoint.move_to([3, 0.5, 0] )
self.add(snake_case )
lowercase = []
lowercase = []
for i, rect in enumerate(snake_case ):
lowercase = fill.copy().set_fill(snake_case , opacity=0.7 )
target.move_to(snake_case )
ckpt_arr.append(snake_case )
lowercase = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(snake_case )
self.add(*snake_case , *snake_case )
lowercase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowercase = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(snake_case , snake_case )
lowercase = MarkupText(
F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(snake_case , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(snake_case )
lowercase = MarkupText(
F'''Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
lowercase = [meta_mem.copy() for i in range(6 )]
lowercase = [meta_mem.copy() for i in range(6 )]
lowercase = VGroup(*snake_case ).arrange(snake_case , buff=0 )
lowercase = VGroup(*snake_case ).arrange(snake_case , buff=0 )
lowercase = VGroup(snake_case , snake_case ).arrange(snake_case , buff=0 )
lowercase = Text('Disk' , font_size=24 )
lowercase = Group(snake_case , snake_case ).arrange(snake_case , buff=0.5 , aligned_edge=snake_case )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(snake_case , run_time=3 ) , Write(snake_case , run_time=1 ) , Create(snake_case , run_time=1 ) )
lowercase = []
for i, rect in enumerate(snake_case ):
lowercase = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(snake_case , run_time=1.5 ) )
self.play(*snake_case )
self.play(FadeOut(snake_case ) )
lowercase = MarkupText(F'''Then, the checkpoint is removed from memory\nthrough garbage collection.''' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(snake_case , run_time=3 ) )
self.play(
FadeOut(snake_case , snake_case , *snake_case , *snake_case ) , )
self.wait()
| 195 | 1 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = None
_a = None
_a = None
_a = None
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self : str, lowerCamelCase : str=1, lowerCamelCase : Any=0, lowerCamelCase : Optional[int]=2, lowerCamelCase : Tuple=512, lowerCamelCase : List[str]="cls", lowerCamelCase : List[Any]=False, lowerCamelCase : Dict=True, **lowerCamelCase : Union[str, Any], )-> Tuple:
super().__init__(pad_token_id=lowerCamelCase, bos_token_id=lowerCamelCase, eos_token_id=lowerCamelCase, **lowerCamelCase )
lowerCamelCase__ : List[Any] =project_dim
lowerCamelCase__ : Union[str, Any] =pooler_fn
lowerCamelCase__ : Optional[int] =learn_encoder
lowerCamelCase__ : Union[str, Any] =use_attention_mask
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = [r'pooler', r'logit_scale']
_a = [r'position_ids', r'predictions.decoder.bias']
_a = 'roberta'
_a = RobertaSeriesConfig
def __init__( self : int, lowerCamelCase : Tuple )-> Dict:
super().__init__(lowerCamelCase )
lowerCamelCase__ : Any =XLMRobertaModel(lowerCamelCase )
lowerCamelCase__ : Dict =nn.Linear(config.hidden_size, config.project_dim )
lowerCamelCase__ : List[Any] =getattr(lowerCamelCase, '''has_pre_transformation''', lowerCamelCase )
if self.has_pre_transformation:
lowerCamelCase__ : int =nn.Linear(config.hidden_size, config.project_dim )
lowerCamelCase__ : List[Any] =nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps )
self.post_init()
def snake_case ( self : Any, lowerCamelCase : Optional[torch.Tensor] = None, lowerCamelCase : Optional[torch.Tensor] = None, lowerCamelCase : Optional[torch.Tensor] = None, lowerCamelCase : Optional[torch.Tensor] = None, lowerCamelCase : Optional[torch.Tensor] = None, lowerCamelCase : Optional[torch.Tensor] = None, lowerCamelCase : Optional[torch.Tensor] = None, lowerCamelCase : Optional[torch.Tensor] = None, lowerCamelCase : Optional[bool] = None, lowerCamelCase : Optional[bool] = None, lowerCamelCase : Optional[bool] = None, )-> List[Any]:
lowerCamelCase__ : List[Any] =return_dict if return_dict is not None else self.config.use_return_dict
lowerCamelCase__ : Tuple =self.base_model(
input_ids=lowerCamelCase, attention_mask=lowerCamelCase, token_type_ids=lowerCamelCase, position_ids=lowerCamelCase, head_mask=lowerCamelCase, inputs_embeds=lowerCamelCase, encoder_hidden_states=lowerCamelCase, encoder_attention_mask=lowerCamelCase, output_attentions=lowerCamelCase, output_hidden_states=True if self.has_pre_transformation else output_hidden_states, return_dict=lowerCamelCase, )
if self.has_pre_transformation:
lowerCamelCase__ : Optional[Any] =outputs['''hidden_states'''][-2]
lowerCamelCase__ : Optional[Any] =self.pre_LN(lowerCamelCase )
lowerCamelCase__ : List[str] =self.transformation_pre(lowerCamelCase )
return TransformationModelOutput(
projection_state=lowerCamelCase, last_hidden_state=outputs.last_hidden_state, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
else:
lowerCamelCase__ : Union[str, Any] =self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=lowerCamelCase, last_hidden_state=outputs.last_hidden_state, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
| 365 |
"""simple docstring"""
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
_lowercase : Tuple = logging.getLogger(__name__)
def snake_case__ ( __lowerCamelCase : torch.nn.Module , __lowerCamelCase : BnbQuantizationConfig , __lowerCamelCase : Union[str, os.PathLike] = None , __lowerCamelCase : Optional[Dict[str, Union[int, str, torch.device]]] = None , __lowerCamelCase : Optional[List[str]] = None , __lowerCamelCase : Optional[Dict[Union[int, str], Union[int, str]]] = None , __lowerCamelCase : Optional[Union[str, os.PathLike]] = None , __lowerCamelCase : bool = False , ):
"""simple docstring"""
lowerCamelCase__ : str =bnb_quantization_config.load_in_abit
lowerCamelCase__ : str =bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'''You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'''
''' make sure you have the latest version of `bitsandbytes` installed.''' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'''You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'''
'''make sure you have the latest version of `bitsandbytes` installed.''' )
lowerCamelCase__ : str =[]
# custom device map
if isinstance(__lowerCamelCase , __lowerCamelCase ) and len(device_map.keys() ) > 1:
lowerCamelCase__ : Union[str, Any] =[key for key, value in device_map.items() if value in ['''disk''', '''cpu''']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
lowerCamelCase__ : Any =get_keys_to_not_convert(__lowerCamelCase )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(__lowerCamelCase )
lowerCamelCase__ : Tuple =bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
lowerCamelCase__ : Optional[Any] =[]
lowerCamelCase__ : List[Any] =bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(__lowerCamelCase )
# compatibility with peft
lowerCamelCase__ : List[str] =load_in_abit
lowerCamelCase__ : List[str] =load_in_abit
lowerCamelCase__ : Union[str, Any] =get_parameter_device(__lowerCamelCase )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'''It is not recommended to quantize a loaded model. '''
'''The model should be instantiated under the `init_empty_weights` context manager.''' )
lowerCamelCase__ : str =replace_with_bnb_layers(__lowerCamelCase , __lowerCamelCase , modules_to_not_convert=__lowerCamelCase )
# convert param to the right dtype
lowerCamelCase__ : Union[str, Any] =bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
lowerCamelCase__ : Optional[int] =name.replace('''.weight''' , '''''' ).replace('''.bias''' , '''''' )
lowerCamelCase__ : Dict =getattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(__lowerCamelCase ):
param.to(__lowerCamelCase )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info(
f'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
'''We move the model to cuda.''' )
return model
elif weights_location is None:
raise RuntimeError(
f'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' )
else:
with init_empty_weights():
lowerCamelCase__ : Dict =replace_with_bnb_layers(
__lowerCamelCase , __lowerCamelCase , modules_to_not_convert=__lowerCamelCase )
lowerCamelCase__ : Optional[int] =get_quantized_model_device_map(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , max_memory=__lowerCamelCase , no_split_module_classes=__lowerCamelCase , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
lowerCamelCase__ : List[str] =True
lowerCamelCase__ : Dict =any(x in list(device_map.values() ) for x in ['''cpu''', '''disk'''] )
load_checkpoint_in_model(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , dtype=bnb_quantization_config.torch_dtype , offload_folder=__lowerCamelCase , offload_state_dict=__lowerCamelCase , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(__lowerCamelCase , device_map=__lowerCamelCase , offload_dir=__lowerCamelCase )
def snake_case__ ( __lowerCamelCase : Dict , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Tuple=None , __lowerCamelCase : Optional[int]=None ):
"""simple docstring"""
if device_map is None:
if torch.cuda.is_available():
lowerCamelCase__ : List[Any] ={'''''': torch.cuda.current_device()}
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info('''The device_map was not initialized.''' '''Setting device_map to `{\'\':torch.cuda.current_device()}`.''' )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'''If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '''
'''\'sequential\'.''' )
lowerCamelCase__ : List[Any] ={}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
lowerCamelCase__ : int ={}
lowerCamelCase__ : Optional[int] =special_dtypes
lowerCamelCase__ : List[str] =no_split_module_classes
lowerCamelCase__ : Tuple =bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
lowerCamelCase__ : List[str] =get_balanced_memory(
__lowerCamelCase , low_zero=(device_map == '''balanced_low_0''') , max_memory=__lowerCamelCase , **__lowerCamelCase , )
lowerCamelCase__ : str =max_memory
lowerCamelCase__ : Any =infer_auto_device_map(__lowerCamelCase , **__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
# check if don't have any quantized module on the cpu
lowerCamelCase__ : List[str] =bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
lowerCamelCase__ : List[str] ={
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'''
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
''' )
else:
logger.info(
'''Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit''' )
del device_map_without_some_modules
return device_map
def snake_case__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : str=None ):
"""simple docstring"""
if modules_to_not_convert is None:
lowerCamelCase__ : Dict =[]
lowerCamelCase__ , lowerCamelCase__ : List[Any] =_replace_with_bnb_layers(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def snake_case__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : int , __lowerCamelCase : int=None , __lowerCamelCase : Optional[Any]=None , ):
"""simple docstring"""
lowerCamelCase__ : Tuple =False
for name, module in model.named_children():
if current_key_name is None:
lowerCamelCase__ : Optional[Any] =[]
current_key_name.append(__lowerCamelCase )
if isinstance(__lowerCamelCase , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
lowerCamelCase__ : Optional[Any] ='''.'''.join(__lowerCamelCase )
lowerCamelCase__ : Tuple =True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
lowerCamelCase__ : Any =False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
lowerCamelCase__ : List[str] =bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=__lowerCamelCase , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
lowerCamelCase__ : str =bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('''load_in_8bit and load_in_4bit can\'t be both False''' )
lowerCamelCase__ : Any =module.weight.data
if module.bias is not None:
lowerCamelCase__ : Any =module.bias.data
bnb_module.requires_grad_(__lowerCamelCase )
setattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : str =True
if len(list(module.children() ) ) > 0:
lowerCamelCase__ , lowerCamelCase__ : Optional[int] =_replace_with_bnb_layers(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : Any =has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def snake_case__ ( __lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
# Create a copy of the model
with init_empty_weights():
lowerCamelCase__ : Optional[Any] =deepcopy(__lowerCamelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
lowerCamelCase__ : Union[str, Any] =find_tied_parameters(__lowerCamelCase )
# For compatibility with Accelerate < 0.18
if isinstance(__lowerCamelCase , __lowerCamelCase ):
lowerCamelCase__ : List[str] =sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
lowerCamelCase__ : Any =sum(__lowerCamelCase , [] )
lowerCamelCase__ : Any =len(__lowerCamelCase ) > 0
# Check if it is a base model
lowerCamelCase__ : Optional[Any] =False
if hasattr(__lowerCamelCase , '''base_model_prefix''' ):
lowerCamelCase__ : Dict =not hasattr(__lowerCamelCase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
lowerCamelCase__ : List[str] =list(model.named_children() )
lowerCamelCase__ : Any =[list_modules[-1][0]]
# add last module together with tied weights
lowerCamelCase__ : Optional[Any] =set(__lowerCamelCase ) - set(__lowerCamelCase )
lowerCamelCase__ : List[str] =list(set(__lowerCamelCase ) ) + list(__lowerCamelCase )
# remove ".weight" from the keys
lowerCamelCase__ : Optional[Any] =['''.weight''', '''.bias''']
lowerCamelCase__ : List[Any] =[]
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
lowerCamelCase__ : Union[str, Any] =name.replace(__lowerCamelCase , '''''' )
filtered_module_names.append(__lowerCamelCase )
return filtered_module_names
def snake_case__ ( __lowerCamelCase : Tuple ):
"""simple docstring"""
for m in model.modules():
if isinstance(__lowerCamelCase , bnb.nn.Linearabit ):
return True
return False
def snake_case__ ( __lowerCamelCase : nn.Module ):
"""simple docstring"""
return next(parameter.parameters() ).device
def snake_case__ ( __lowerCamelCase : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : int , __lowerCamelCase : Optional[int] ):
"""simple docstring"""
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(__lowerCamelCase , __lowerCamelCase , 0 , dtype=__lowerCamelCase , value=__lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =param_name
lowerCamelCase__ : Dict =model
if "." in tensor_name:
lowerCamelCase__ : Optional[int] =tensor_name.split('''.''' )
for split in splits[:-1]:
lowerCamelCase__ : Union[str, Any] =getattr(__lowerCamelCase , __lowerCamelCase )
if new_module is None:
raise ValueError(f'''{module} has no attribute {split}.''' )
lowerCamelCase__ : Union[str, Any] =new_module
lowerCamelCase__ : List[Any] =splits[-1]
# offload weights
lowerCamelCase__ : Optional[Any] =False
offload_weight(module._parameters[tensor_name] , __lowerCamelCase , __lowerCamelCase , index=__lowerCamelCase )
if hasattr(module._parameters[tensor_name] , '''SCB''' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('''weight''' , '''SCB''' ) , __lowerCamelCase , index=__lowerCamelCase , )
else:
offload_weight(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , index=__lowerCamelCase )
offload_weight(__lowerCamelCase , param_name.replace('''weight''' , '''SCB''' ) , __lowerCamelCase , index=__lowerCamelCase )
set_module_tensor_to_device(__lowerCamelCase , __lowerCamelCase , '''meta''' , dtype=__lowerCamelCase , value=torch.empty(*param.size() ) )
| 272 | 0 |
"""simple docstring"""
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 72 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
# TODO: upload to AWS
lowerCAmelCase__ = {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'''
),
}
class __snake_case ( _lowercase):
snake_case__ : int = "retribert"
def __init__( self : Optional[int] , __lowerCAmelCase : str=3_0_5_2_2 , __lowerCAmelCase : Tuple=7_6_8 , __lowerCAmelCase : Union[str, Any]=8 , __lowerCAmelCase : Any=1_2 , __lowerCAmelCase : Optional[int]=3_0_7_2 , __lowerCAmelCase : List[str]="gelu" , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Tuple=5_1_2 , __lowerCAmelCase : List[str]=2 , __lowerCAmelCase : Tuple=0.02 , __lowerCAmelCase : Optional[Any]=1E-12 , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Any=1_2_8 , __lowerCAmelCase : Optional[int]=0 , **__lowerCAmelCase : str , ):
"""simple docstring"""
super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase )
_lowerCamelCase : Dict = vocab_size
_lowerCamelCase : Union[str, Any] = hidden_size
_lowerCamelCase : Dict = num_hidden_layers
_lowerCamelCase : int = num_attention_heads
_lowerCamelCase : int = hidden_act
_lowerCamelCase : str = intermediate_size
_lowerCamelCase : Union[str, Any] = hidden_dropout_prob
_lowerCamelCase : List[Any] = attention_probs_dropout_prob
_lowerCamelCase : Optional[int] = max_position_embeddings
_lowerCamelCase : List[Any] = type_vocab_size
_lowerCamelCase : Any = initializer_range
_lowerCamelCase : Optional[int] = layer_norm_eps
_lowerCamelCase : int = share_encoders
_lowerCamelCase : Optional[Any] = projection_dim
| 72 | 1 |
import qiskit
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int , lowerCAmelCase: int ) -> qiskit.result.counts.Counts:
_UpperCAmelCase : Optional[Any] = qiskit.Aer.get_backend("aer_simulator" )
# Create a Quantum Circuit acting on the q register
_UpperCAmelCase : Optional[Any] = qiskit.QuantumCircuit(lowerCAmelCase , lowerCAmelCase )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
_UpperCAmelCase : Any = qiskit.execute(lowerCAmelCase , lowerCAmelCase , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(lowerCAmelCase )
if __name__ == "__main__":
print(F'''Total count for various states are: {single_qubit_measure(1, 1)}''')
| 364 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
('align', 'EfficientNetImageProcessor'),
('beit', 'BeitImageProcessor'),
('bit', 'BitImageProcessor'),
('blip', 'BlipImageProcessor'),
('blip-2', 'BlipImageProcessor'),
('bridgetower', 'BridgeTowerImageProcessor'),
('chinese_clip', 'ChineseCLIPImageProcessor'),
('clip', 'CLIPImageProcessor'),
('clipseg', 'ViTImageProcessor'),
('conditional_detr', 'ConditionalDetrImageProcessor'),
('convnext', 'ConvNextImageProcessor'),
('convnextv2', 'ConvNextImageProcessor'),
('cvt', 'ConvNextImageProcessor'),
('data2vec-vision', 'BeitImageProcessor'),
('deformable_detr', 'DeformableDetrImageProcessor'),
('deit', 'DeiTImageProcessor'),
('deta', 'DetaImageProcessor'),
('detr', 'DetrImageProcessor'),
('dinat', 'ViTImageProcessor'),
('donut-swin', 'DonutImageProcessor'),
('dpt', 'DPTImageProcessor'),
('efficientformer', 'EfficientFormerImageProcessor'),
('efficientnet', 'EfficientNetImageProcessor'),
('flava', 'FlavaImageProcessor'),
('focalnet', 'BitImageProcessor'),
('git', 'CLIPImageProcessor'),
('glpn', 'GLPNImageProcessor'),
('groupvit', 'CLIPImageProcessor'),
('imagegpt', 'ImageGPTImageProcessor'),
('instructblip', 'BlipImageProcessor'),
('layoutlmv2', 'LayoutLMv2ImageProcessor'),
('layoutlmv3', 'LayoutLMv3ImageProcessor'),
('levit', 'LevitImageProcessor'),
('mask2former', 'Mask2FormerImageProcessor'),
('maskformer', 'MaskFormerImageProcessor'),
('mgp-str', 'ViTImageProcessor'),
('mobilenet_v1', 'MobileNetV1ImageProcessor'),
('mobilenet_v2', 'MobileNetV2ImageProcessor'),
('mobilevit', 'MobileViTImageProcessor'),
('mobilevit', 'MobileViTImageProcessor'),
('mobilevitv2', 'MobileViTImageProcessor'),
('nat', 'ViTImageProcessor'),
('oneformer', 'OneFormerImageProcessor'),
('owlvit', 'OwlViTImageProcessor'),
('perceiver', 'PerceiverImageProcessor'),
('pix2struct', 'Pix2StructImageProcessor'),
('poolformer', 'PoolFormerImageProcessor'),
('regnet', 'ConvNextImageProcessor'),
('resnet', 'ConvNextImageProcessor'),
('sam', 'SamImageProcessor'),
('segformer', 'SegformerImageProcessor'),
('swiftformer', 'ViTImageProcessor'),
('swin', 'ViTImageProcessor'),
('swin2sr', 'Swin2SRImageProcessor'),
('swinv2', 'ViTImageProcessor'),
('table-transformer', 'DetrImageProcessor'),
('timesformer', 'VideoMAEImageProcessor'),
('tvlt', 'TvltImageProcessor'),
('upernet', 'SegformerImageProcessor'),
('van', 'ConvNextImageProcessor'),
('videomae', 'VideoMAEImageProcessor'),
('vilt', 'ViltImageProcessor'),
('vit', 'ViTImageProcessor'),
('vit_hybrid', 'ViTHybridImageProcessor'),
('vit_mae', 'ViTImageProcessor'),
('vit_msn', 'ViTImageProcessor'),
('xclip', 'CLIPImageProcessor'),
('yolos', 'YolosImageProcessor'),
]
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: str ) -> Tuple:
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
_UpperCAmelCase : Union[str, Any] = model_type_to_module_name(lowerCAmelCase )
_UpperCAmelCase : Optional[int] = importlib.import_module(F'.{module_name}' , "transformers.models" )
try:
return getattr(lowerCAmelCase , lowerCAmelCase )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(lowerCAmelCase , "__name__" , lowerCAmelCase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
_UpperCAmelCase : Any = importlib.import_module("transformers" )
if hasattr(lowerCAmelCase , lowerCAmelCase ):
return getattr(lowerCAmelCase , lowerCAmelCase )
return None
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Union[str, os.PathLike] , lowerCAmelCase: Optional[Union[str, os.PathLike]] = None , lowerCAmelCase: bool = False , lowerCAmelCase: bool = False , lowerCAmelCase: Optional[Dict[str, str]] = None , lowerCAmelCase: Optional[Union[bool, str]] = None , lowerCAmelCase: Optional[str] = None , lowerCAmelCase: bool = False , **lowerCAmelCase: List[Any] , ) -> Any:
_UpperCAmelCase : List[Any] = get_file_from_repo(
lowerCAmelCase , lowerCAmelCase , cache_dir=lowerCAmelCase , force_download=lowerCAmelCase , resume_download=lowerCAmelCase , proxies=lowerCAmelCase , use_auth_token=lowerCAmelCase , revision=lowerCAmelCase , local_files_only=lowerCAmelCase , )
if resolved_config_file is None:
logger.info(
"Could not locate the image processor configuration file, will try to use the model config instead." )
return {}
with open(lowerCAmelCase , encoding="utf-8" ) as reader:
return json.load(lowerCAmelCase )
class a :
def __init__( self ):
'''simple docstring'''
raise EnvironmentError(
"AutoImageProcessor is designed to be instantiated "
"using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method." )
@classmethod
@replace_list_option_in_docstrings(A_ )
def _UpperCAmelCase ( cls , A_ , **A_ ):
'''simple docstring'''
_UpperCAmelCase : str = kwargs.pop("config" , A_ )
_UpperCAmelCase : Dict = kwargs.pop("trust_remote_code" , A_ )
_UpperCAmelCase : str = True
_UpperCAmelCase , _UpperCAmelCase : Tuple = ImageProcessingMixin.get_image_processor_dict(A_ , **A_ )
_UpperCAmelCase : Any = config_dict.get("image_processor_type" , A_ )
_UpperCAmelCase : str = None
if "AutoImageProcessor" in config_dict.get("auto_map" , {} ):
_UpperCAmelCase : int = config_dict["auto_map"]["AutoImageProcessor"]
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
_UpperCAmelCase : Any = config_dict.pop("feature_extractor_type" , A_ )
if feature_extractor_class is not None:
logger.warning(
"Could not find image processor class in the image processor config or the model config. Loading"
" based on pattern matching with the model's feature extractor configuration." )
_UpperCAmelCase : Optional[Any] = feature_extractor_class.replace("FeatureExtractor" , "ImageProcessor" )
if "AutoFeatureExtractor" in config_dict.get("auto_map" , {} ):
_UpperCAmelCase : Optional[Any] = config_dict["auto_map"]["AutoFeatureExtractor"]
_UpperCAmelCase : List[Any] = feature_extractor_auto_map.replace("FeatureExtractor" , "ImageProcessor" )
logger.warning(
"Could not find image processor auto map in the image processor config or the model config."
" Loading based on pattern matching with the model's feature extractor configuration." )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(A_ , A_ ):
_UpperCAmelCase : Any = AutoConfig.from_pretrained(A_ , **A_ )
# It could be in `config.image_processor_type``
_UpperCAmelCase : Optional[Any] = getattr(A_ , "image_processor_type" , A_ )
if hasattr(A_ , "auto_map" ) and "AutoImageProcessor" in config.auto_map:
_UpperCAmelCase : Any = config.auto_map["AutoImageProcessor"]
if image_processor_class is not None:
_UpperCAmelCase : List[str] = image_processor_class_from_name(A_ )
_UpperCAmelCase : Optional[Any] = image_processor_auto_map is not None
_UpperCAmelCase : Any = image_processor_class is not None or type(A_ ) in IMAGE_PROCESSOR_MAPPING
_UpperCAmelCase : List[Any] = resolve_trust_remote_code(
A_ , A_ , A_ , A_ )
if has_remote_code and trust_remote_code:
_UpperCAmelCase : Optional[int] = get_class_from_dynamic_module(
A_ , A_ , **A_ )
_UpperCAmelCase : Optional[int] = kwargs.pop("code_revision" , A_ )
if os.path.isdir(A_ ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(A_ , **A_ )
elif image_processor_class is not None:
return image_processor_class.from_dict(A_ , **A_ )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(A_ ) in IMAGE_PROCESSOR_MAPPING:
_UpperCAmelCase : Optional[int] = IMAGE_PROCESSOR_MAPPING[type(A_ )]
return image_processor_class.from_dict(A_ , **A_ )
raise ValueError(
f'Unrecognized image processor in {pretrained_model_name_or_path}. Should have a '
f'`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following '
f'`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}' )
@staticmethod
def _UpperCAmelCase ( A_ , A_ ):
'''simple docstring'''
IMAGE_PROCESSOR_MAPPING.register(A_ , A_ )
| 189 | 0 |
from __future__ import annotations
from collections import deque
class _UpperCamelCase :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
__lowercase = []
self.adlist.append(
{'''value''': '''''', '''next_states''': [], '''fail_state''': 0, '''output''': []} )
for keyword in keywords:
self.add_keyword(__UpperCamelCase )
self.set_fail_transitions()
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> int | None:
'''simple docstring'''
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ ) -> None:
'''simple docstring'''
__lowercase = 0
for character in keyword:
__lowercase = self.find_next_state(__UpperCamelCase , __UpperCamelCase )
if next_state is None:
self.adlist.append(
{
'''value''': character,
'''next_states''': [],
'''fail_state''': 0,
'''output''': [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
__lowercase = len(self.adlist ) - 1
else:
__lowercase = next_state
self.adlist[current_state]["output"].append(__UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( self ) -> None:
'''simple docstring'''
__lowercase = deque()
for node in self.adlist[0]["next_states"]:
q.append(__UpperCamelCase )
__lowercase = 0
while q:
__lowercase = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(__UpperCamelCase )
__lowercase = self.adlist[r]['fail_state']
while (
self.find_next_state(__UpperCamelCase , self.adlist[child]['''value'''] ) is None
and state != 0
):
__lowercase = self.adlist[state]['fail_state']
__lowercase = self.find_next_state(
__UpperCamelCase , self.adlist[child]['''value'''] )
if self.adlist[child]["fail_state"] is None:
__lowercase = 0
__lowercase = (
self.adlist[child]['output']
+ self.adlist[self.adlist[child]['fail_state']]['output']
)
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ ) -> dict[str, list[int]]:
'''simple docstring'''
__lowercase = {} # returns a dict with keywords and list of its occurrences
__lowercase = 0
for i in range(len(__UpperCamelCase ) ):
while (
self.find_next_state(__UpperCamelCase , string[i] ) is None
and current_state != 0
):
__lowercase = self.adlist[current_state]['fail_state']
__lowercase = self.find_next_state(__UpperCamelCase , string[i] )
if next_state is None:
__lowercase = 0
else:
__lowercase = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
__lowercase = []
result[key].append(i - len(__UpperCamelCase ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod() | 210 | import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ : int = logging.get_logger(__name__)
def UpperCamelCase__ ( A__ , A__=False ) -> List[Any]:
snake_case__ : str = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""deit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""deit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""deit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""deit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""deit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""deit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""deit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""deit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""deit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""deit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'deit.embeddings.cls_token'),
('dist_token', 'deit.embeddings.distillation_token'),
('patch_embed.proj.weight', 'deit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'deit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'deit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
snake_case__ : Dict = [(pair[0], pair[1][4:]) if pair[1].startswith('deit' ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
('norm.weight', 'deit.layernorm.weight'),
('norm.bias', 'deit.layernorm.bias'),
('head.weight', 'cls_classifier.weight'),
('head.bias', 'cls_classifier.bias'),
('head_dist.weight', 'distillation_classifier.weight'),
('head_dist.bias', 'distillation_classifier.bias'),
] )
return rename_keys
def UpperCamelCase__ ( A__ , A__ , A__=False ) -> Dict:
for i in range(config.num_hidden_layers ):
if base_model:
snake_case__ : Tuple = ''
else:
snake_case__ : List[Any] = 'deit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case__ : Tuple = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
snake_case__ : List[Any] = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
snake_case__ : int = in_proj_weight[
: config.hidden_size, :
]
snake_case__ : Optional[Any] = in_proj_bias[: config.hidden_size]
snake_case__ : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case__ : str = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case__ : Tuple = in_proj_weight[
-config.hidden_size :, :
]
snake_case__ : int = in_proj_bias[-config.hidden_size :]
def UpperCamelCase__ ( A__ , A__ , A__ ) -> str:
snake_case__ : Optional[int] = dct.pop(A__ )
snake_case__ : int = val
def UpperCamelCase__ ( ) -> Dict:
snake_case__ : str = 'http://images.cocodataset.org/val2017/000000039769.jpg'
snake_case__ : Dict = Image.open(requests.get(A__ , stream=A__ ).raw )
return im
@torch.no_grad()
def UpperCamelCase__ ( A__ , A__ ) -> List[str]:
snake_case__ : List[Any] = DeiTConfig()
# all deit models have fine-tuned heads
snake_case__ : Optional[int] = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
snake_case__ : Any = 1000
snake_case__ : Union[str, Any] = 'huggingface/label-files'
snake_case__ : int = 'imagenet-1k-id2label.json'
snake_case__ : str = json.load(open(hf_hub_download(A__ , A__ , repo_type='dataset' ) , 'r' ) )
snake_case__ : int = {int(A__ ): v for k, v in idalabel.items()}
snake_case__ : List[Any] = idalabel
snake_case__ : List[Any] = {v: k for k, v in idalabel.items()}
snake_case__ : Tuple = int(deit_name[-6:-4] )
snake_case__ : str = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith('tiny' ):
snake_case__ : Optional[int] = 192
snake_case__ : str = 768
snake_case__ : Optional[Any] = 12
snake_case__ : Tuple = 3
elif deit_name[9:].startswith('small' ):
snake_case__ : str = 384
snake_case__ : str = 1536
snake_case__ : Dict = 12
snake_case__ : str = 6
if deit_name[9:].startswith('base' ):
pass
elif deit_name[4:].startswith('large' ):
snake_case__ : List[Any] = 1024
snake_case__ : str = 4096
snake_case__ : Tuple = 24
snake_case__ : Tuple = 16
# load original model from timm
snake_case__ : Optional[int] = timm.create_model(A__ , pretrained=A__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case__ : Optional[Any] = timm_model.state_dict()
snake_case__ : Tuple = create_rename_keys(A__ , A__ )
for src, dest in rename_keys:
rename_key(A__ , A__ , A__ )
read_in_q_k_v(A__ , A__ , A__ )
# load HuggingFace model
snake_case__ : int = DeiTForImageClassificationWithTeacher(A__ ).eval()
model.load_state_dict(A__ )
# Check outputs on an image, prepared by DeiTImageProcessor
snake_case__ : Union[str, Any] = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
snake_case__ : List[Any] = DeiTImageProcessor(size=A__ , crop_size=config.image_size )
snake_case__ : Tuple = image_processor(images=prepare_img() , return_tensors='pt' )
snake_case__ : Tuple = encoding['pixel_values']
snake_case__ : Dict = model(A__ )
snake_case__ : Union[str, Any] = timm_model(A__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(A__ , outputs.logits , atol=1e-3 )
Path(A__ ).mkdir(exist_ok=A__ )
print(F"""Saving model {deit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(A__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(A__ )
if __name__ == "__main__":
lowerCAmelCase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--deit_name''',
default='''vit_deit_base_distilled_patch16_224''',
type=str,
help='''Name of the DeiT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
lowerCAmelCase__ : int = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 143 | 0 |
'''simple docstring'''
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
lowercase__ = {"UserAgent": UserAgent().random}
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : str = script.contents[0]
UpperCAmelCase : str = json.loads(data[data.find('{"config"' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class A_ :
'''simple docstring'''
def __init__( self : Tuple , lowercase_ : Union[str, Any] ) -> Dict:
UpperCAmelCase : int = f"""https://www.instagram.com/{username}/"""
UpperCAmelCase : int = self.get_json()
def UpperCAmelCase_ ( self : Optional[Any] ) -> dict:
UpperCAmelCase : List[Any] = requests.get(self.url , headers=snake_case__ ).text
UpperCAmelCase : Optional[Any] = BeautifulSoup(snake_case__ , 'html.parser' ).find_all('script' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self : str ) -> str:
return f"""{self.__class__.__name__}(\'{self.username}\')"""
def __str__( self : List[Any] ) -> str:
return f"""{self.fullname} ({self.username}) is {self.biography}"""
@property
def UpperCAmelCase_ ( self : List[str] ) -> str:
return self.user_data["username"]
@property
def UpperCAmelCase_ ( self : str ) -> str:
return self.user_data["full_name"]
@property
def UpperCAmelCase_ ( self : Tuple ) -> str:
return self.user_data["biography"]
@property
def UpperCAmelCase_ ( self : List[str] ) -> str:
return self.user_data["business_email"]
@property
def UpperCAmelCase_ ( self : str ) -> str:
return self.user_data["external_url"]
@property
def UpperCAmelCase_ ( self : List[str] ) -> int:
return self.user_data["edge_followed_by"]["count"]
@property
def UpperCAmelCase_ ( self : int ) -> int:
return self.user_data["edge_follow"]["count"]
@property
def UpperCAmelCase_ ( self : Tuple ) -> int:
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def UpperCAmelCase_ ( self : str ) -> str:
return self.user_data["profile_pic_url_hd"]
@property
def UpperCAmelCase_ ( self : List[Any] ) -> bool:
return self.user_data["is_verified"]
@property
def UpperCAmelCase_ ( self : Tuple ) -> bool:
return self.user_data["is_private"]
def UpperCamelCase( UpperCAmelCase_ = "github" ):
import os
if os.environ.get('CI' ):
return # test failing on GitHub Actions
UpperCAmelCase : Tuple = InstagramUser(__lowerCAmelCase )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , __lowerCAmelCase )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 1_50
assert instagram_user.number_of_followers > 12_00_00
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "[email protected]"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('https://instagram.' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ = InstagramUser("github")
print(instagram_user)
print(f'''{instagram_user.number_of_posts = }''')
print(f'''{instagram_user.number_of_followers = }''')
print(f'''{instagram_user.number_of_followings = }''')
print(f'''{instagram_user.email = }''')
print(f'''{instagram_user.website = }''')
print(f'''{instagram_user.profile_picture_url = }''')
print(f'''{instagram_user.is_verified = }''')
print(f'''{instagram_user.is_private = }''')
| 362 |
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
lowercase__ = logging.get_logger("transformers.models.speecht5")
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
hf_model.apply_weight_norm()
UpperCAmelCase : Dict = checkpoint['input_conv.weight_g']
UpperCAmelCase : Any = checkpoint['input_conv.weight_v']
UpperCAmelCase : Any = checkpoint['input_conv.bias']
for i in range(len(config.upsample_rates ) ):
UpperCAmelCase : Any = checkpoint[F"""upsamples.{i}.1.weight_g"""]
UpperCAmelCase : Union[str, Any] = checkpoint[F"""upsamples.{i}.1.weight_v"""]
UpperCAmelCase : str = checkpoint[F"""upsamples.{i}.1.bias"""]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
UpperCAmelCase : Any = checkpoint[F"""blocks.{i}.convs1.{j}.1.weight_g"""]
UpperCAmelCase : Optional[int] = checkpoint[F"""blocks.{i}.convs1.{j}.1.weight_v"""]
UpperCAmelCase : Dict = checkpoint[F"""blocks.{i}.convs1.{j}.1.bias"""]
UpperCAmelCase : Union[str, Any] = checkpoint[F"""blocks.{i}.convs2.{j}.1.weight_g"""]
UpperCAmelCase : int = checkpoint[F"""blocks.{i}.convs2.{j}.1.weight_v"""]
UpperCAmelCase : Optional[int] = checkpoint[F"""blocks.{i}.convs2.{j}.1.bias"""]
UpperCAmelCase : Dict = checkpoint['output_conv.1.weight_g']
UpperCAmelCase : str = checkpoint['output_conv.1.weight_v']
UpperCAmelCase : Union[str, Any] = checkpoint['output_conv.1.bias']
hf_model.remove_weight_norm()
@torch.no_grad()
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None , UpperCAmelCase_=None , ):
if config_path is not None:
UpperCAmelCase : Any = SpeechTaHifiGanConfig.from_pretrained(UpperCAmelCase_ )
else:
UpperCAmelCase : Optional[Any] = SpeechTaHifiGanConfig()
UpperCAmelCase : List[Any] = SpeechTaHifiGan(UpperCAmelCase_ )
UpperCAmelCase : int = torch.load(UpperCAmelCase_ )
load_weights(orig_checkpoint['model']['generator'] , UpperCAmelCase_ , UpperCAmelCase_ )
UpperCAmelCase : int = np.load(UpperCAmelCase_ )
UpperCAmelCase : str = stats[0].reshape(-1 )
UpperCAmelCase : List[str] = stats[1].reshape(-1 )
UpperCAmelCase : Union[str, Any] = torch.from_numpy(UpperCAmelCase_ ).float()
UpperCAmelCase : List[str] = torch.from_numpy(UpperCAmelCase_ ).float()
model.save_pretrained(UpperCAmelCase_ )
if repo_id:
print('Pushing to the hub...' )
model.push_to_hub(UpperCAmelCase_ )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
lowercase__ = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 280 | 0 |
"""simple docstring"""
import math
def lowercase__ ( _UpperCAmelCase ) -> bool:
'''simple docstring'''
lowercase : Optional[int] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(lowerCAmelCase_ )
def lowercase__ ( _UpperCAmelCase = 1 / 1_23_45 ) -> int:
'''simple docstring'''
lowercase : int = 0
lowercase : Optional[Any] = 0
lowercase : int = 3
while True:
lowercase : Tuple = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(lowerCAmelCase_ ):
lowercase : Union[str, Any] = int(lowerCAmelCase_ )
total_partitions += 1
if check_partition_perfect(lowerCAmelCase_ ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(lowerCAmelCase_ )
integer += 1
if __name__ == "__main__":
print(f'''{solution() = }''')
| 255 |
'''simple docstring'''
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class __magic_name__ ( _UpperCamelCase ):
def __init__( self : Optional[int] ,_UpperCAmelCase : Union[str, "sqlalchemy.sql.Selectable"] ,_UpperCAmelCase : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] ,_UpperCAmelCase : Optional[Features] = None ,_UpperCAmelCase : str = None ,_UpperCAmelCase : bool = False ,**_UpperCAmelCase : Dict ,):
super().__init__(features=_UpperCAmelCase ,cache_dir=_UpperCAmelCase ,keep_in_memory=_UpperCAmelCase ,**_UpperCAmelCase )
_a : Tuple = Sql(
cache_dir=_UpperCAmelCase ,features=_UpperCAmelCase ,sql=_UpperCAmelCase ,con=_UpperCAmelCase ,**_UpperCAmelCase ,)
def __lowercase ( self : Dict ):
_a : Optional[Any] = None
_a : Dict = None
_a : Dict = None
_a : Optional[int] = None
self.builder.download_and_prepare(
download_config=_UpperCAmelCase ,download_mode=_UpperCAmelCase ,verification_mode=_UpperCAmelCase ,base_path=_UpperCAmelCase ,)
# Build dataset for splits
_a : List[str] = self.builder.as_dataset(
split='train' ,verification_mode=_UpperCAmelCase ,in_memory=self.keep_in_memory )
return dataset
class __magic_name__ :
def __init__( self : Optional[int] ,_UpperCAmelCase : Dataset ,_UpperCAmelCase : str ,_UpperCAmelCase : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] ,_UpperCAmelCase : Optional[int] = None ,_UpperCAmelCase : Optional[int] = None ,**_UpperCAmelCase : Dict ,):
if num_proc is not None and num_proc <= 0:
raise ValueError(F"""num_proc {num_proc} must be an integer > 0.""" )
_a : Dict = dataset
_a : List[Any] = name
_a : Tuple = con
_a : Union[str, Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
_a : List[Any] = num_proc
_a : Tuple = to_sql_kwargs
def __lowercase ( self : List[Any] ):
_a : Tuple = self.to_sql_kwargs.pop('sql' ,_UpperCAmelCase )
_a : str = self.to_sql_kwargs.pop('con' ,_UpperCAmelCase )
_a : Optional[Any] = self.to_sql_kwargs.pop('index' ,_UpperCAmelCase )
_a : Any = self._write(index=_UpperCAmelCase ,**self.to_sql_kwargs )
return written
def __lowercase ( self : Optional[int] ,_UpperCAmelCase : Dict ):
_a , _a , _a : Any = args
_a : Tuple = {**to_sql_kwargs, 'if_exists': 'append'} if offset > 0 else to_sql_kwargs
_a : Dict = query_table(
table=self.dataset.data ,key=slice(_UpperCAmelCase ,offset + self.batch_size ) ,indices=self.dataset._indices ,)
_a : Tuple = batch.to_pandas()
_a : Dict = df.to_sql(self.name ,self.con ,index=_UpperCAmelCase ,**_UpperCAmelCase )
return num_rows or len(_UpperCAmelCase )
def __lowercase ( self : int ,_UpperCAmelCase : Optional[int] ,**_UpperCAmelCase : List[Any] ):
_a : Union[str, Any] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 ,len(self.dataset ) ,self.batch_size ) ,unit='ba' ,disable=not logging.is_progress_bar_enabled() ,desc='Creating SQL from Arrow format' ,):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
_a , _a : List[Any] = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql ,[(offset, index, to_sql_kwargs) for offset in range(0 ,_UpperCAmelCase ,_UpperCAmelCase )] ,) ,total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size ,unit='ba' ,disable=not logging.is_progress_bar_enabled() ,desc='Creating SQL from Arrow format' ,):
written += num_rows
return written
| 89 | 0 |
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class lowercase_ :
def __init__( self , __UpperCamelCase , __UpperCamelCase=1_3 , __UpperCamelCase=7 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=9_9 , __UpperCamelCase=6_4 , __UpperCamelCase=5 , __UpperCamelCase=4 , __UpperCamelCase=3_7 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=5_1_2 , __UpperCamelCase=1_6 , __UpperCamelCase=2 , __UpperCamelCase=0.02 , __UpperCamelCase=3 , __UpperCamelCase=4 , __UpperCamelCase=None , ):
"""simple docstring"""
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = seq_length
UpperCamelCase_ = is_training
UpperCamelCase_ = use_input_mask
UpperCamelCase_ = use_token_type_ids
UpperCamelCase_ = use_labels
UpperCamelCase_ = vocab_size
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_act
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = type_vocab_size
UpperCamelCase_ = type_sequence_label_size
UpperCamelCase_ = initializer_range
UpperCamelCase_ = num_labels
UpperCamelCase_ = num_choices
UpperCamelCase_ = scope
UpperCamelCase_ = vocab_size - 1
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_ = None
if self.use_input_mask:
UpperCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase_ = None
if self.use_labels:
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase_ = self.get_config()
return config, input_ids, input_mask, token_labels
def lowerCamelCase_ ( self ):
"""simple docstring"""
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.prepare_config_and_inputs()
UpperCamelCase_ = True
return config, input_ids, input_mask, token_labels
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = GPTNeoXModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCamelCase_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase )
UpperCamelCase_ = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = True
UpperCamelCase_ = GPTNeoXModel(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCamelCase_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = GPTNeoXForCausalLM(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCamelCase_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = self.num_labels
UpperCamelCase_ = GPTNeoXForQuestionAnswering(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCamelCase_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = self.num_labels
UpperCamelCase_ = GPTNeoXForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = self.num_labels
UpperCamelCase_ = GPTNeoXForTokenClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCamelCase_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = True
UpperCamelCase_ = GPTNeoXForCausalLM(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
# first forward pass
UpperCamelCase_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase , use_cache=__UpperCamelCase )
UpperCamelCase_ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase_ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCamelCase_ = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase_ = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCamelCase_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase , output_hidden_states=__UpperCamelCase )
UpperCamelCase_ = output_from_no_past["""hidden_states"""][0]
UpperCamelCase_ = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase , output_hidden_states=__UpperCamelCase , )["""hidden_states"""][0]
# select random slice
UpperCamelCase_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase_ = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.prepare_config_and_inputs()
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = config_and_inputs
UpperCamelCase_ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowercase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
A__ : Tuple = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
A__ : str = (GPTNeoXForCausalLM,) if is_torch_available() else ()
A__ : Tuple = (
{
"""feature-extraction""": GPTNeoXModel,
"""question-answering""": GPTNeoXForQuestionAnswering,
"""text-classification""": GPTNeoXForSequenceClassification,
"""text-generation""": GPTNeoXForCausalLM,
"""token-classification""": GPTNeoXForTokenClassification,
"""zero-shot""": GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
A__ : int = False
A__ : List[str] = False
A__ : List[Any] = False
A__ : Any = False
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = GPTNeoXModelTester(self )
UpperCamelCase_ = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=6_4 , num_attention_heads=8 )
def lowerCamelCase_ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCamelCase_ = None
self.model_tester.create_and_check_model_as_decoder(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*__UpperCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCamelCase )
@unittest.skip(reason="""Feed forward chunking is not implemented""" )
def lowerCamelCase_ ( self ):
"""simple docstring"""
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def lowerCamelCase_ ( self , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ = ids_tensor([1, 1_0] , config.vocab_size )
UpperCamelCase_ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
UpperCamelCase_ = GPTNeoXModel(__UpperCamelCase )
original_model.to(__UpperCamelCase )
original_model.eval()
UpperCamelCase_ = original_model(__UpperCamelCase ).last_hidden_state
UpperCamelCase_ = original_model(__UpperCamelCase ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
UpperCamelCase_ = {"""type""": scaling_type, """factor""": 10.0}
UpperCamelCase_ = GPTNeoXModel(__UpperCamelCase )
scaled_model.to(__UpperCamelCase )
scaled_model.eval()
UpperCamelCase_ = scaled_model(__UpperCamelCase ).last_hidden_state
UpperCamelCase_ = scaled_model(__UpperCamelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-5 ) )
@require_torch
class lowercase_ ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = AutoTokenizer.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
for checkpointing in [True, False]:
UpperCamelCase_ = GPTNeoXForCausalLM.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(__UpperCamelCase )
UpperCamelCase_ = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(__UpperCamelCase )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
UpperCamelCase_ = """My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure"""
UpperCamelCase_ = model.generate(**__UpperCamelCase , do_sample=__UpperCamelCase , max_new_tokens=2_0 )
UpperCamelCase_ = tokenizer.batch_decode(__UpperCamelCase )[0]
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
| 261 |
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class lowercase_ ( __SCREAMING_SNAKE_CASE ):
A__ : List[Any] = """EncodecFeatureExtractor"""
A__ : Tuple = ("""T5Tokenizer""", """T5TokenizerFast""")
def __init__( self , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
super().__init__(__UpperCamelCase , __UpperCamelCase )
UpperCamelCase_ = self.feature_extractor
UpperCamelCase_ = False
def lowerCamelCase_ ( self , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=True ):
"""simple docstring"""
return self.tokenizer.get_decoder_prompt_ids(task=__UpperCamelCase , language=__UpperCamelCase , no_timestamps=__UpperCamelCase )
def __call__( self , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*__UpperCamelCase , **__UpperCamelCase )
UpperCamelCase_ = kwargs.pop("""audio""" , __UpperCamelCase )
UpperCamelCase_ = kwargs.pop("""sampling_rate""" , __UpperCamelCase )
UpperCamelCase_ = kwargs.pop("""text""" , __UpperCamelCase )
if len(__UpperCamelCase ) > 0:
UpperCamelCase_ = args[0]
UpperCamelCase_ = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if text is not None:
UpperCamelCase_ = self.tokenizer(__UpperCamelCase , **__UpperCamelCase )
if audio is not None:
UpperCamelCase_ = self.feature_extractor(__UpperCamelCase , *__UpperCamelCase , sampling_rate=__UpperCamelCase , **__UpperCamelCase )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
UpperCamelCase_ = audio_inputs["""input_values"""]
if "padding_mask" in audio_inputs:
UpperCamelCase_ = audio_inputs["""padding_mask"""]
return inputs
def lowerCamelCase_ ( self , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = kwargs.pop("""audio""" , __UpperCamelCase )
UpperCamelCase_ = kwargs.pop("""padding_mask""" , __UpperCamelCase )
if len(__UpperCamelCase ) > 0:
UpperCamelCase_ = args[0]
UpperCamelCase_ = args[1:]
if audio_values is not None:
return self._decode_audio(__UpperCamelCase , padding_mask=__UpperCamelCase )
else:
return self.tokenizer.batch_decode(*__UpperCamelCase , **__UpperCamelCase )
def lowerCamelCase_ ( self , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
return self.tokenizer.decode(*__UpperCamelCase , **__UpperCamelCase )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase = None ):
"""simple docstring"""
UpperCamelCase_ = to_numpy(__UpperCamelCase )
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = audio_values.shape
if padding_mask is None:
return list(__UpperCamelCase )
UpperCamelCase_ = to_numpy(__UpperCamelCase )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
UpperCamelCase_ = seq_len - padding_mask.shape[-1]
UpperCamelCase_ = 1 - self.feature_extractor.padding_value
UpperCamelCase_ = np.pad(__UpperCamelCase , ((0, 0), (0, difference)) , """constant""" , constant_values=__UpperCamelCase )
UpperCamelCase_ = audio_values.tolist()
for i in range(__UpperCamelCase ):
UpperCamelCase_ = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
UpperCamelCase_ = sliced_audio.reshape(__UpperCamelCase , -1 )
return audio_values
| 261 | 1 |
from math import pi, sqrt, tan
def __UpperCamelCase ( _A ):
if side_length < 0:
raise ValueError('''surface_area_cube() only accepts non-negative values''' )
return 6 * side_length**2
def __UpperCamelCase ( _A , _A , _A ):
if length < 0 or breadth < 0 or height < 0:
raise ValueError('''surface_area_cuboid() only accepts non-negative values''' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def __UpperCamelCase ( _A ):
if radius < 0:
raise ValueError('''surface_area_sphere() only accepts non-negative values''' )
return 4 * pi * radius**2
def __UpperCamelCase ( _A ):
if radius < 0:
raise ValueError('''surface_area_hemisphere() only accepts non-negative values''' )
return 3 * pi * radius**2
def __UpperCamelCase ( _A , _A ):
if radius < 0 or height < 0:
raise ValueError('''surface_area_cone() only accepts non-negative values''' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def __UpperCamelCase ( _A , _A , _A ):
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'''surface_area_conical_frustum() only accepts non-negative values''' )
lowerCAmelCase_ = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def __UpperCamelCase ( _A , _A ):
if radius < 0 or height < 0:
raise ValueError('''surface_area_cylinder() only accepts non-negative values''' )
return 2 * pi * radius * (height + radius)
def __UpperCamelCase ( _A , _A ):
if torus_radius < 0 or tube_radius < 0:
raise ValueError('''surface_area_torus() only accepts non-negative values''' )
if torus_radius < tube_radius:
raise ValueError(
'''surface_area_torus() does not support spindle or self intersecting tori''' )
return 4 * pow(lowerCamelCase_ , 2 ) * torus_radius * tube_radius
def __UpperCamelCase ( _A , _A ):
if length < 0 or width < 0:
raise ValueError('''area_rectangle() only accepts non-negative values''' )
return length * width
def __UpperCamelCase ( _A ):
if side_length < 0:
raise ValueError('''area_square() only accepts non-negative values''' )
return side_length**2
def __UpperCamelCase ( _A , _A ):
if base < 0 or height < 0:
raise ValueError('''area_triangle() only accepts non-negative values''' )
return (base * height) / 2
def __UpperCamelCase ( _A , _A , _A ):
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('''area_triangle_three_sides() only accepts non-negative values''' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('''Given three sides do not form a triangle''' )
lowerCAmelCase_ = (sidea + sidea + sidea) / 2
lowerCAmelCase_ = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def __UpperCamelCase ( _A , _A ):
if base < 0 or height < 0:
raise ValueError('''area_parallelogram() only accepts non-negative values''' )
return base * height
def __UpperCamelCase ( _A , _A , _A ):
if basea < 0 or basea < 0 or height < 0:
raise ValueError('''area_trapezium() only accepts non-negative values''' )
return 1 / 2 * (basea + basea) * height
def __UpperCamelCase ( _A ):
if radius < 0:
raise ValueError('''area_circle() only accepts non-negative values''' )
return pi * radius**2
def __UpperCamelCase ( _A , _A ):
if radius_x < 0 or radius_y < 0:
raise ValueError('''area_ellipse() only accepts non-negative values''' )
return pi * radius_x * radius_y
def __UpperCamelCase ( _A , _A ):
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('''area_rhombus() only accepts non-negative values''' )
return 1 / 2 * diagonal_a * diagonal_a
def __UpperCamelCase ( _A , _A ):
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) or sides < 3:
raise ValueError(
'''area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides''' )
elif length < 0:
raise ValueError(
'''area_reg_polygon() only accepts non-negative values as \
length of a side''' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('''[DEMO] Areas of various geometric shapes: \n''')
print(f"Rectangle: {area_rectangle(10, 20) = }")
print(f"Square: {area_square(10) = }")
print(f"Triangle: {area_triangle(10, 10) = }")
print(f"Triangle: {area_triangle_three_sides(5, 12, 13) = }")
print(f"Parallelogram: {area_parallelogram(10, 20) = }")
print(f"Rhombus: {area_rhombus(10, 20) = }")
print(f"Trapezium: {area_trapezium(10, 20, 30) = }")
print(f"Circle: {area_circle(20) = }")
print(f"Ellipse: {area_ellipse(10, 20) = }")
print('''\nSurface Areas of various geometric shapes: \n''')
print(f"Cube: {surface_area_cube(20) = }")
print(f"Cuboid: {surface_area_cuboid(10, 20, 30) = }")
print(f"Sphere: {surface_area_sphere(20) = }")
print(f"Hemisphere: {surface_area_hemisphere(20) = }")
print(f"Cone: {surface_area_cone(10, 20) = }")
print(f"Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }")
print(f"Cylinder: {surface_area_cylinder(10, 20) = }")
print(f"Torus: {surface_area_torus(20, 10) = }")
print(f"Equilateral Triangle: {area_reg_polygon(3, 10) = }")
print(f"Square: {area_reg_polygon(4, 10) = }")
print(f"Reqular Pentagon: {area_reg_polygon(5, 10) = }")
| 278 |
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
__snake_case : int =logging.get_logger(__name__)
enable_full_determinism()
class lowerCamelCase__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase):
'''simple docstring'''
snake_case_ =UNetaDModel
snake_case_ ="""sample"""
@property
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : List[str] = 4
lowerCAmelCase__ : List[str] = 3
lowerCAmelCase__ : Any = (32, 32)
lowerCAmelCase__ : Optional[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCamelCase )
lowerCAmelCase__ : Union[str, Any] = torch.tensor([10] ).to(__lowerCamelCase )
return {"sample": noise, "timestep": time_step}
@property
def lowerCAmelCase__ (self ) -> List[Any]:
"""simple docstring"""
return (3, 32, 32)
@property
def lowerCAmelCase__ (self ) -> Optional[int]:
"""simple docstring"""
return (3, 32, 32)
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : str = {
'''block_out_channels''': (32, 64),
'''down_block_types''': ('''DownBlock2D''', '''AttnDownBlock2D'''),
'''up_block_types''': ('''AttnUpBlock2D''', '''UpBlock2D'''),
'''attention_head_dim''': 3,
'''out_channels''': 3,
'''in_channels''': 3,
'''layers_per_block''': 2,
'''sample_size''': 32,
}
lowerCAmelCase__ : List[str] = self.dummy_input
return init_dict, inputs_dict
class lowerCamelCase__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase):
'''simple docstring'''
snake_case_ =UNetaDModel
snake_case_ ="""sample"""
@property
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : str = 4
lowerCAmelCase__ : Optional[int] = 4
lowerCAmelCase__ : Optional[Any] = (32, 32)
lowerCAmelCase__ : Optional[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCamelCase )
lowerCAmelCase__ : Optional[int] = torch.tensor([10] ).to(__lowerCamelCase )
return {"sample": noise, "timestep": time_step}
@property
def lowerCAmelCase__ (self ) -> List[Any]:
"""simple docstring"""
return (4, 32, 32)
@property
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
return (4, 32, 32)
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : Tuple = {
'''sample_size''': 32,
'''in_channels''': 4,
'''out_channels''': 4,
'''layers_per_block''': 2,
'''block_out_channels''': (32, 64),
'''attention_head_dim''': 32,
'''down_block_types''': ('''DownBlock2D''', '''DownBlock2D'''),
'''up_block_types''': ('''UpBlock2D''', '''UpBlock2D'''),
}
lowerCAmelCase__ : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' ,output_loading_info=__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertEqual(len(loading_info['''missing_keys'''] ) ,0 )
model.to(__lowerCamelCase )
lowerCAmelCase__ : Tuple = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' ,'''This test is supposed to run on GPU''' )
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : Dict = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' ,output_loading_info=__lowerCamelCase )
model.to(__lowerCamelCase )
lowerCAmelCase__ : Optional[Any] = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' ,'''This test is supposed to run on GPU''' )
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' ,output_loading_info=__lowerCamelCase )
model_accelerate.to(__lowerCamelCase )
model_accelerate.eval()
lowerCAmelCase__ : Union[str, Any] = torch.randn(
1 ,model_accelerate.config.in_channels ,model_accelerate.config.sample_size ,model_accelerate.config.sample_size ,generator=torch.manual_seed(0 ) ,)
lowerCAmelCase__ : Dict = noise.to(__lowerCamelCase )
lowerCAmelCase__ : List[Any] = torch.tensor([10] * noise.shape[0] ).to(__lowerCamelCase )
lowerCAmelCase__ : Tuple = model_accelerate(__lowerCamelCase ,__lowerCamelCase )['''sample''']
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = UNetaDModel.from_pretrained(
'''fusing/unet-ldm-dummy-update''' ,output_loading_info=__lowerCamelCase ,low_cpu_mem_usage=__lowerCamelCase )
model_normal_load.to(__lowerCamelCase )
model_normal_load.eval()
lowerCAmelCase__ : List[Any] = model_normal_load(__lowerCamelCase ,__lowerCamelCase )['''sample''']
assert torch_all_close(__lowerCamelCase ,__lowerCamelCase ,rtol=1e-3 )
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : List[str] = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' )
model.eval()
model.to(__lowerCamelCase )
lowerCAmelCase__ : Union[str, Any] = torch.randn(
1 ,model.config.in_channels ,model.config.sample_size ,model.config.sample_size ,generator=torch.manual_seed(0 ) ,)
lowerCAmelCase__ : str = noise.to(__lowerCamelCase )
lowerCAmelCase__ : List[Any] = torch.tensor([10] * noise.shape[0] ).to(__lowerCamelCase )
with torch.no_grad():
lowerCAmelCase__ : str = model(__lowerCamelCase ,__lowerCamelCase ).sample
lowerCAmelCase__ : List[str] = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
lowerCAmelCase__ : str = torch.tensor([-13.3258, -20.1100, -15.9873, -17.6617, -23.0596, -17.9419, -13.3675, -16.1889, -12.3800] )
# fmt: on
self.assertTrue(torch_all_close(__lowerCamelCase ,__lowerCamelCase ,rtol=1e-3 ) )
class lowerCamelCase__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase):
'''simple docstring'''
snake_case_ =UNetaDModel
snake_case_ ="""sample"""
@property
def lowerCAmelCase__ (self ,__lowerCamelCase=(32, 32) ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : str = 4
lowerCAmelCase__ : Optional[int] = 3
lowerCAmelCase__ : Tuple = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCamelCase )
lowerCAmelCase__ : Optional[int] = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa ,device=__lowerCamelCase )
return {"sample": noise, "timestep": time_step}
@property
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
return (3, 32, 32)
@property
def lowerCAmelCase__ (self ) -> Optional[int]:
"""simple docstring"""
return (3, 32, 32)
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : Tuple = {
'''block_out_channels''': [32, 64, 64, 64],
'''in_channels''': 3,
'''layers_per_block''': 1,
'''out_channels''': 3,
'''time_embedding_type''': '''fourier''',
'''norm_eps''': 1e-6,
'''mid_block_scale_factor''': math.sqrt(2.0 ),
'''norm_num_groups''': None,
'''down_block_types''': [
'''SkipDownBlock2D''',
'''AttnSkipDownBlock2D''',
'''SkipDownBlock2D''',
'''SkipDownBlock2D''',
],
'''up_block_types''': [
'''SkipUpBlock2D''',
'''SkipUpBlock2D''',
'''AttnSkipUpBlock2D''',
'''SkipUpBlock2D''',
],
}
lowerCAmelCase__ : Tuple = self.dummy_input
return init_dict, inputs_dict
@slow
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : Dict = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' ,output_loading_info=__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertEqual(len(loading_info['''missing_keys'''] ) ,0 )
model.to(__lowerCamelCase )
lowerCAmelCase__ : Optional[int] = self.dummy_input
lowerCAmelCase__ : Tuple = floats_tensor((4, 3) + (2_56, 2_56) ).to(__lowerCamelCase )
lowerCAmelCase__ : Union[str, Any] = noise
lowerCAmelCase__ : Union[str, Any] = model(**__lowerCamelCase )
assert image is not None, "Make sure output is not None"
@slow
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' )
model.to(__lowerCamelCase )
lowerCAmelCase__ : Dict = 4
lowerCAmelCase__ : Optional[Any] = 3
lowerCAmelCase__ : List[Any] = (2_56, 2_56)
lowerCAmelCase__ : str = torch.ones((batch_size, num_channels) + sizes ).to(__lowerCamelCase )
lowerCAmelCase__ : Optional[int] = torch.tensor(batch_size * [1e-4] ).to(__lowerCamelCase )
with torch.no_grad():
lowerCAmelCase__ : List[Any] = model(__lowerCamelCase ,__lowerCamelCase ).sample
lowerCAmelCase__ : Any = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
lowerCAmelCase__ : Optional[Any] = torch.tensor([-4842.8691, -6499.6631, -3800.1953, -7978.2686, -1_0980.7129, -2_0028.8535, 8148.2822, 2342.2905, 567.7608] )
# fmt: on
self.assertTrue(torch_all_close(__lowerCamelCase ,__lowerCamelCase ,rtol=1e-2 ) )
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : int = UNetaDModel.from_pretrained('''fusing/ncsnpp-ffhq-ve-dummy-update''' )
model.to(__lowerCamelCase )
lowerCAmelCase__ : Optional[int] = 4
lowerCAmelCase__ : Dict = 3
lowerCAmelCase__ : str = (32, 32)
lowerCAmelCase__ : Tuple = torch.ones((batch_size, num_channels) + sizes ).to(__lowerCamelCase )
lowerCAmelCase__ : Tuple = torch.tensor(batch_size * [1e-4] ).to(__lowerCamelCase )
with torch.no_grad():
lowerCAmelCase__ : Optional[int] = model(__lowerCamelCase ,__lowerCamelCase ).sample
lowerCAmelCase__ : List[Any] = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
lowerCAmelCase__ : Union[str, Any] = torch.tensor([-0.0325, -0.0900, -0.0869, -0.0332, -0.0725, -0.0270, -0.0101, 0.0227, 0.0256] )
# fmt: on
self.assertTrue(torch_all_close(__lowerCamelCase ,__lowerCamelCase ,rtol=1e-2 ) )
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
pass
| 129 | 0 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = 42
class UpperCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
@register_to_config
def __init__( self , lowerCAmelCase_ = 3 , lowerCAmelCase_ = 3 , lowerCAmelCase_ = ("DownEncoderBlock2D",) , lowerCAmelCase_ = ("UpDecoderBlock2D",) , lowerCAmelCase_ = (64,) , lowerCAmelCase_ = 1 , lowerCAmelCase_ = "silu" , lowerCAmelCase_ = 3 , lowerCAmelCase_ = 32 , lowerCAmelCase_ = 256 , lowerCAmelCase_ = 32 , lowerCAmelCase_ = None , lowerCAmelCase_ = 0.1_82_15 , lowerCAmelCase_ = "group" , ) -> Tuple:
super().__init__()
# pass init params to Encoder
_snake_case = Encoder(
in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , down_block_types=lowerCAmelCase_ , block_out_channels=lowerCAmelCase_ , layers_per_block=lowerCAmelCase_ , act_fn=lowerCAmelCase_ , norm_num_groups=lowerCAmelCase_ , double_z=lowerCAmelCase_ , )
_snake_case = vq_embed_dim if vq_embed_dim is not None else latent_channels
_snake_case = nn.Convad(lowerCAmelCase_ , lowerCAmelCase_ , 1 )
_snake_case = VectorQuantizer(lowerCAmelCase_ , lowerCAmelCase_ , beta=0.25 , remap=lowerCAmelCase_ , sane_index_shape=lowerCAmelCase_ )
_snake_case = nn.Convad(lowerCAmelCase_ , lowerCAmelCase_ , 1 )
# pass init params to Decoder
_snake_case = Decoder(
in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , up_block_types=lowerCAmelCase_ , block_out_channels=lowerCAmelCase_ , layers_per_block=lowerCAmelCase_ , act_fn=lowerCAmelCase_ , norm_num_groups=lowerCAmelCase_ , norm_type=lowerCAmelCase_ , )
@apply_forward_hook
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = True ) -> VQEncoderOutput:
_snake_case = self.encoder(lowerCAmelCase_ )
_snake_case = self.quant_conv(lowerCAmelCase_ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=lowerCAmelCase_ )
@apply_forward_hook
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = False , lowerCAmelCase_ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
# also go through quantization layer
if not force_not_quantize:
_snake_case , _snake_case , _snake_case = self.quantize(lowerCAmelCase_ )
else:
_snake_case = h
_snake_case = self.post_quant_conv(lowerCAmelCase_ )
_snake_case = self.decoder(lowerCAmelCase_ , quant if self.config.norm_type == 'spatial' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
_snake_case = sample
_snake_case = self.encode(lowerCAmelCase_ ).latents
_snake_case = self.decode(lowerCAmelCase_ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCAmelCase_ )
| 295 |
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase_ ( _lowerCamelCase , unittest.TestCase ):
lowerCAmelCase_ = LEDTokenizer
lowerCAmelCase_ = LEDTokenizerFast
lowerCAmelCase_ = True
def lowerCAmelCase ( self ) -> List[str]:
super().setUp()
_snake_case = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
_snake_case = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
_snake_case = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
_snake_case = {'unk_token': '<unk>'}
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(lowerCAmelCase_ ) )
def lowerCAmelCase ( self , **lowerCAmelCase_ ) -> List[str]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def lowerCAmelCase ( self , **lowerCAmelCase_ ) -> str:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Union[str, Any]:
return "lower newer", "lower newer"
@cached_property
def lowerCAmelCase ( self ) -> Optional[Any]:
return LEDTokenizer.from_pretrained('allenai/led-base-16384' )
@cached_property
def lowerCAmelCase ( self ) -> Union[str, Any]:
return LEDTokenizerFast.from_pretrained('allenai/led-base-16384' )
@require_torch
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_snake_case = [0, 250, 251, 1_7818, 13, 3_9186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case = tokenizer(lowerCAmelCase_ , max_length=len(lowerCAmelCase_ ) , padding=lowerCAmelCase_ , return_tensors='pt' )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
_snake_case = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@require_torch
def lowerCAmelCase ( self ) -> Optional[Any]:
_snake_case = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors='pt' )
self.assertIn('input_ids' , lowerCAmelCase_ )
self.assertIn('attention_mask' , lowerCAmelCase_ )
self.assertNotIn('labels' , lowerCAmelCase_ )
self.assertNotIn('decoder_attention_mask' , lowerCAmelCase_ )
@require_torch
def lowerCAmelCase ( self ) -> Optional[int]:
_snake_case = [
'Summary of the text.',
'Another summary.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case = tokenizer(text_target=lowerCAmelCase_ , max_length=32 , padding='max_length' , return_tensors='pt' )
self.assertEqual(32 , targets['input_ids'].shape[1] )
@require_torch
def lowerCAmelCase ( self ) -> List[str]:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case = tokenizer(
['I am a small frog' * 1024, 'I am a small frog'] , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors='pt' )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(batch.input_ids.shape , (2, 5122) )
@require_torch
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = ['A long paragraph for summarization.']
_snake_case = [
'Summary of the text.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case = tokenizer(lowerCAmelCase_ , return_tensors='pt' )
_snake_case = tokenizer(text_target=lowerCAmelCase_ , return_tensors='pt' )
_snake_case = inputs['input_ids']
_snake_case = targets['input_ids']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def lowerCAmelCase ( self ) -> List[str]:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case = ['Summary of the text.', 'Another summary.']
_snake_case = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
_snake_case = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ )
_snake_case = [[0] * len(lowerCAmelCase_ ) for x in encoded_output['input_ids']]
_snake_case = tokenizer.pad(lowerCAmelCase_ )
self.assertSequenceEqual(outputs['global_attention_mask'] , lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> Tuple:
pass
def lowerCAmelCase ( self ) -> str:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_snake_case = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = 'A, <mask> AllenNLP sentence.'
_snake_case = tokenizer_r.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
_snake_case = tokenizer_p.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
_snake_case = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
_snake_case = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
lowerCAmelCase_ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
lowerCAmelCase_ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
| 295 | 1 |
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def _lowerCAmelCase ( ) -> int:
"""simple docstring"""
snake_case__ : List[str] = argparse.ArgumentParser()
parser.add_argument('''--model_ckpt''' , type=__lowerCAmelCase , default='''microsoft/unixcoder-base-nine''' )
parser.add_argument('''--num_epochs''' , type=__lowerCAmelCase , default=5 )
parser.add_argument('''--batch_size''' , type=__lowerCAmelCase , default=6 )
parser.add_argument('''--gradient_accumulation_steps''' , type=__lowerCAmelCase , default=1 )
parser.add_argument('''--freeze''' , type=__lowerCAmelCase , default=__lowerCAmelCase )
parser.add_argument('''--learning_rate''' , type=__lowerCAmelCase , default=5E-4 )
parser.add_argument('''--seed''' , type=__lowerCAmelCase , default=0 )
parser.add_argument('''--lr_scheduler_type''' , type=__lowerCAmelCase , default='''cosine''' )
parser.add_argument('''--num_warmup_steps''' , type=__lowerCAmelCase , default=10 )
parser.add_argument('''--weight_decay''' , type=__lowerCAmelCase , default=0.01 )
parser.add_argument('''--output_dir''' , type=__lowerCAmelCase , default='''./results''' )
return parser.parse_args()
A__ = load('''accuracy''')
def _lowerCAmelCase ( __lowerCAmelCase ) -> Dict:
"""simple docstring"""
snake_case__ , snake_case__ : Optional[int] = eval_pred
snake_case__ : Union[str, Any] = np.argmax(__lowerCAmelCase , axis=1 )
return metric.compute(predictions=__lowerCAmelCase , references=__lowerCAmelCase )
class a ( __lowerCamelCase ):
def __init__( self :Optional[Any] ,__lowercase :Optional[int] ):
super().__init__()
snake_case__ : List[str] = trainer
def __lowerCamelCase ( self :Optional[int] ,__lowercase :Dict ,__lowercase :List[Any] ,__lowercase :Any ,**__lowercase :Optional[Any] ):
if control.should_evaluate:
snake_case__ : Any = deepcopy(__lowercase )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset ,metric_key_prefix='''train''' )
return control_copy
def _lowerCAmelCase ( ) -> List[Any]:
"""simple docstring"""
snake_case__ : Union[str, Any] = get_args()
set_seed(args.seed )
snake_case__ : List[str] = load_dataset('''codeparrot/codecomplex''' , split='''train''' )
snake_case__ : Tuple = dataset.train_test_split(test_size=0.2 )
snake_case__ : Optional[Any] = train_test['''test'''].train_test_split(test_size=0.5 )
snake_case__ : Tuple = DatasetDict(
{
'''train''': train_test['''train'''],
'''test''': test_validation['''train'''],
'''valid''': test_validation['''test'''],
} )
print('''Loading tokenizer and model''' )
snake_case__ : Optional[Any] = AutoTokenizer.from_pretrained(args.model_ckpt )
snake_case__ : Optional[Any] = tokenizer.eos_token
snake_case__ : Any = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
snake_case__ : Optional[int] = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
snake_case__ : str = False
snake_case__ : List[str] = ClassLabel(num_classes=7 , names=list(set(train_test_validation['''train''']['''complexity'''] ) ) )
def tokenize(__lowerCAmelCase ):
snake_case__ : Optional[int] = tokenizer(example['''src'''] , truncation=__lowerCAmelCase , max_length=1024 )
snake_case__ : Dict = labels.straint(example['''complexity'''] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
snake_case__ : str = train_test_validation.map(
__lowerCAmelCase , batched=__lowerCAmelCase , remove_columns=train_test_validation['''train'''].column_names , )
snake_case__ : List[Any] = DataCollatorWithPadding(tokenizer=__lowerCAmelCase )
snake_case__ : int = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy='''epoch''' , save_strategy='''epoch''' , logging_strategy='''epoch''' , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model='''accuracy''' , run_name='''complexity-java''' , report_to='''wandb''' , )
snake_case__ : List[Any] = Trainer(
model=__lowerCAmelCase , args=__lowerCAmelCase , train_dataset=tokenized_datasets['''train'''] , eval_dataset=tokenized_datasets['''valid'''] , tokenizer=__lowerCAmelCase , data_collator=__lowerCAmelCase , compute_metrics=__lowerCAmelCase , )
print('''Training...''' )
trainer.add_callback(CustomCallback(__lowerCAmelCase ) )
trainer.train()
if __name__ == "__main__":
main()
| 230 |
import math
import sys
def _lowerCAmelCase ( __lowerCAmelCase ) -> str:
"""simple docstring"""
snake_case__ : Optional[Any] = ''''''
try:
with open(__lowerCAmelCase , '''rb''' ) as binary_file:
snake_case__ : int = binary_file.read()
for dat in data:
snake_case__ : Any = f"""{dat:08b}"""
result += curr_byte
return result
except OSError:
print('''File not accessible''' )
sys.exit()
def _lowerCAmelCase ( __lowerCAmelCase ) -> str:
"""simple docstring"""
snake_case__ : List[str] = {'''0''': '''0''', '''1''': '''1'''}
snake_case__ , snake_case__ : List[Any] = '''''', ''''''
snake_case__ : Tuple = len(__lowerCAmelCase )
for i in range(len(__lowerCAmelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
snake_case__ : Tuple = lexicon[curr_string]
result += last_match_id
snake_case__ : Any = last_match_id + '''0'''
if math.loga(__lowerCAmelCase ).is_integer():
snake_case__ : Tuple = {}
for curr_key in list(__lowerCAmelCase ):
snake_case__ : Union[str, Any] = lexicon.pop(__lowerCAmelCase )
snake_case__ : Optional[Any] = new_lex
snake_case__ : Tuple = last_match_id + '''1'''
index += 1
snake_case__ : Dict = ''''''
return result
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> None:
"""simple docstring"""
snake_case__ : Dict = 8
try:
with open(__lowerCAmelCase , '''wb''' ) as opened_file:
snake_case__ : Union[str, Any] = [
to_write[i : i + byte_length]
for i in range(0 , len(__lowerCAmelCase ) , __lowerCAmelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('''10000000''' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(__lowerCAmelCase , 2 ).to_bytes(1 , byteorder='''big''' ) )
except OSError:
print('''File not accessible''' )
sys.exit()
def _lowerCAmelCase ( __lowerCAmelCase ) -> str:
"""simple docstring"""
snake_case__ : Any = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
snake_case__ : Optional[int] = data_bits[counter:]
snake_case__ : int = data_bits[counter + 1 :]
return data_bits
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> None:
"""simple docstring"""
snake_case__ : Union[str, Any] = read_file_binary(__lowerCAmelCase )
snake_case__ : List[str] = remove_prefix(__lowerCAmelCase )
snake_case__ : Any = decompress_data(__lowerCAmelCase )
write_file_binary(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 230 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : int , _lowerCamelCase : Dict , _lowerCamelCase : Any=13 , _lowerCamelCase : Optional[int]=7 , _lowerCamelCase : List[Any]=True , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : Any=True , _lowerCamelCase : Tuple=True , _lowerCamelCase : Tuple=99 , _lowerCamelCase : Tuple=32 , _lowerCamelCase : Tuple=5 , _lowerCamelCase : List[str]=4 , _lowerCamelCase : Tuple=37 , _lowerCamelCase : Union[str, Any]="gelu" , _lowerCamelCase : Optional[Any]=0.1 , _lowerCamelCase : Any=0.1 , _lowerCamelCase : Tuple=512 , _lowerCamelCase : str=16 , _lowerCamelCase : int=2 , _lowerCamelCase : str=0.0_2 , _lowerCamelCase : int=4 , ):
_snake_case = parent
_snake_case = batch_size
_snake_case = seq_length
_snake_case = is_training
_snake_case = use_attention_mask
_snake_case = use_token_type_ids
_snake_case = use_labels
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = type_sequence_label_size
_snake_case = initializer_range
_snake_case = num_choices
def lowercase ( self : Union[str, Any] ):
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case = None
if self.use_attention_mask:
_snake_case = random_attention_mask([self.batch_size, self.seq_length] )
_snake_case = None
if self.use_token_type_ids:
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_snake_case = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowercase ( self : Tuple ):
_snake_case = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case , _snake_case = config_and_inputs
_snake_case = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class lowerCAmelCase__ ( A_ , unittest.TestCase ):
__a = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowercase ( self : Dict ):
_snake_case = FlaxAlbertModelTester(self )
@slow
def lowercase ( self : List[Any] ):
for model_class_name in self.all_model_classes:
_snake_case = model_class_name.from_pretrained('''albert-base-v2''' )
_snake_case = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowerCamelCase )
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def lowercase ( self : Optional[int] ):
_snake_case = FlaxAlbertModel.from_pretrained('''albert-base-v2''' )
_snake_case = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
_snake_case = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_snake_case = model(_lowerCamelCase , attention_mask=_lowerCamelCase )[0]
_snake_case = (1, 11, 768)
self.assertEqual(output.shape , _lowerCamelCase )
_snake_case = np.array(
[[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , _lowerCamelCase , atol=1e-4 ) )
| 40 |
"""simple docstring"""
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class lowerCAmelCase__ ( A_ ):
__a = 42
__a = jnp.floataa
__a = True
def lowercase ( self : Tuple ):
super().setup()
_snake_case = nn.Dense(5 , dtype=self.dtype )
def __call__( self : str , *_lowerCamelCase : int , **_lowerCamelCase : Any ):
_snake_case = super().__call__(*_lowerCamelCase , **_lowerCamelCase )
_snake_case = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class lowerCAmelCase__ ( A_ ):
__a = FlaxBigBirdForNaturalQuestionsModule
def _UpperCAmelCase ( __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[str] , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str] ) -> Any:
def cross_entropy(__lowerCamelCase : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any]=None ):
_snake_case = logits.shape[-1]
_snake_case = (labels[..., None] == jnp.arange(__lowerCamelCase )[None]).astype('''f4''' )
_snake_case = jax.nn.log_softmax(__lowerCamelCase , axis=-1 )
_snake_case = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
_snake_case = reduction(__lowerCamelCase )
return loss
_snake_case = partial(__lowerCamelCase , reduction=jnp.mean )
_snake_case = cross_entropy(__lowerCamelCase , __lowerCamelCase )
_snake_case = cross_entropy(__lowerCamelCase , __lowerCamelCase )
_snake_case = cross_entropy(__lowerCamelCase , __lowerCamelCase )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class lowerCAmelCase__ :
__a = "google/bigbird-roberta-base"
__a = 3000
__a = 10500
__a = 128
__a = 3
__a = 1
__a = 5
# tx_args
__a = 3e-5
__a = 0.0
__a = 20000
__a = 0.0095
__a = "bigbird-roberta-natural-questions"
__a = "training-expt"
__a = "data/nq-training.jsonl"
__a = "data/nq-validation.jsonl"
def lowercase ( self : Optional[Any] ):
os.makedirs(self.base_dir , exist_ok=_lowerCamelCase )
_snake_case = os.path.join(self.base_dir , self.save_dir )
_snake_case = self.batch_size_per_device * jax.device_count()
@dataclass
class lowerCAmelCase__ :
__a = 42
__a = 4096 # no dynamic padding on TPUs
def __call__( self : Dict , _lowerCamelCase : Any ):
_snake_case = self.collate_fn(_lowerCamelCase )
_snake_case = jax.tree_util.tree_map(_lowerCamelCase , _lowerCamelCase )
return batch
def lowercase ( self : Dict , _lowerCamelCase : str ):
_snake_case , _snake_case = self.fetch_inputs(features['''input_ids'''] )
_snake_case = {
'''input_ids''': jnp.array(_lowerCamelCase , dtype=jnp.intaa ),
'''attention_mask''': jnp.array(_lowerCamelCase , dtype=jnp.intaa ),
'''start_labels''': jnp.array(features['''start_token'''] , dtype=jnp.intaa ),
'''end_labels''': jnp.array(features['''end_token'''] , dtype=jnp.intaa ),
'''pooled_labels''': jnp.array(features['''category'''] , dtype=jnp.intaa ),
}
return batch
def lowercase ( self : List[Any] , _lowerCamelCase : list ):
_snake_case = [self._fetch_inputs(_lowerCamelCase ) for ids in input_ids]
return zip(*_lowerCamelCase )
def lowercase ( self : Optional[Any] , _lowerCamelCase : list ):
_snake_case = [1 for _ in range(len(_lowerCamelCase ) )]
while len(_lowerCamelCase ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def _UpperCAmelCase ( __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str]=None ) -> str:
if seed is not None:
_snake_case = dataset.shuffle(seed=__lowerCamelCase )
for i in range(len(__lowerCamelCase ) // batch_size ):
_snake_case = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(__lowerCamelCase )
@partial(jax.pmap , axis_name='''batch''' )
def _UpperCAmelCase ( __lowerCamelCase : Optional[int] , __lowerCamelCase : Dict , **__lowerCamelCase : Optional[Any] ) -> Union[str, Any]:
def loss_fn(__lowerCamelCase : Union[str, Any] ):
_snake_case = model_inputs.pop('''start_labels''' )
_snake_case = model_inputs.pop('''end_labels''' )
_snake_case = model_inputs.pop('''pooled_labels''' )
_snake_case = state.apply_fn(**__lowerCamelCase , params=__lowerCamelCase , dropout_rng=__lowerCamelCase , train=__lowerCamelCase )
_snake_case , _snake_case , _snake_case = outputs
return state.loss_fn(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , )
_snake_case , _snake_case = jax.random.split(__lowerCamelCase )
_snake_case = jax.value_and_grad(__lowerCamelCase )
_snake_case , _snake_case = grad_fn(state.params )
_snake_case = jax.lax.pmean({'''loss''': loss} , axis_name='''batch''' )
_snake_case = jax.lax.pmean(__lowerCamelCase , '''batch''' )
_snake_case = state.apply_gradients(grads=__lowerCamelCase )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name='''batch''' )
def _UpperCAmelCase ( __lowerCamelCase : str , **__lowerCamelCase : List[str] ) -> Any:
_snake_case = model_inputs.pop('''start_labels''' )
_snake_case = model_inputs.pop('''end_labels''' )
_snake_case = model_inputs.pop('''pooled_labels''' )
_snake_case = state.apply_fn(**__lowerCamelCase , params=state.params , train=__lowerCamelCase )
_snake_case , _snake_case , _snake_case = outputs
_snake_case = state.loss_fn(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
_snake_case = jax.lax.pmean({'''loss''': loss} , axis_name='''batch''' )
return metrics
class lowerCAmelCase__ ( train_state.TrainState ):
__a = struct.field(pytree_node=A_ )
@dataclass
class lowerCAmelCase__ :
__a = 42
__a = 42
__a = 42
__a = 42
__a = 42
__a = 42
__a = None
def lowercase ( self : List[Any] , _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : Dict , _lowerCamelCase : Dict=None ):
_snake_case = model.params
_snake_case = TrainState.create(
apply_fn=model.__call__ , params=_lowerCamelCase , tx=_lowerCamelCase , loss_fn=_lowerCamelCase , )
if ckpt_dir is not None:
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case = restore_checkpoint(_lowerCamelCase , _lowerCamelCase )
_snake_case = {
'''lr''': args.lr,
'''init_lr''': args.init_lr,
'''warmup_steps''': args.warmup_steps,
'''num_train_steps''': num_train_steps,
'''weight_decay''': args.weight_decay,
}
_snake_case , _snake_case = build_tx(**_lowerCamelCase )
_snake_case = train_state.TrainState(
step=_lowerCamelCase , apply_fn=model.__call__ , params=_lowerCamelCase , tx=_lowerCamelCase , opt_state=_lowerCamelCase , )
_snake_case = args
_snake_case = data_collator
_snake_case = lr
_snake_case = params
_snake_case = jax_utils.replicate(_lowerCamelCase )
return state
def lowercase ( self : List[str] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : str ):
_snake_case = self.args
_snake_case = len(_lowerCamelCase ) // args.batch_size
_snake_case = jax.random.PRNGKey(0 )
_snake_case = jax.random.split(_lowerCamelCase , jax.device_count() )
for epoch in range(args.max_epochs ):
_snake_case = jnp.array(0 , dtype=jnp.floataa )
_snake_case = get_batched_dataset(_lowerCamelCase , args.batch_size , seed=_lowerCamelCase )
_snake_case = 0
for batch in tqdm(_lowerCamelCase , total=_lowerCamelCase , desc=f'''Running EPOCH-{epoch}''' ):
_snake_case = self.data_collator(_lowerCamelCase )
_snake_case , _snake_case , _snake_case = self.train_step_fn(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
running_loss += jax_utils.unreplicate(metrics['''loss'''] )
i += 1
if i % args.logging_steps == 0:
_snake_case = jax_utils.unreplicate(state.step )
_snake_case = running_loss.item() / i
_snake_case = self.scheduler_fn(state_step - 1 )
_snake_case = self.evaluate(_lowerCamelCase , _lowerCamelCase )
_snake_case = {
'''step''': state_step.item(),
'''eval_loss''': eval_loss.item(),
'''tr_loss''': tr_loss,
'''lr''': lr.item(),
}
tqdm.write(str(_lowerCamelCase ) )
self.logger.log(_lowerCamelCase , commit=_lowerCamelCase )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + f'''-e{epoch}-s{i}''' , state=_lowerCamelCase )
def lowercase ( self : Optional[Any] , _lowerCamelCase : Dict , _lowerCamelCase : Union[str, Any] ):
_snake_case = get_batched_dataset(_lowerCamelCase , self.args.batch_size )
_snake_case = len(_lowerCamelCase ) // self.args.batch_size
_snake_case = jnp.array(0 , dtype=jnp.floataa )
_snake_case = 0
for batch in tqdm(_lowerCamelCase , total=_lowerCamelCase , desc='''Evaluating ... ''' ):
_snake_case = self.data_collator(_lowerCamelCase )
_snake_case = self.val_step_fn(_lowerCamelCase , **_lowerCamelCase )
running_loss += jax_utils.unreplicate(metrics['''loss'''] )
i += 1
return running_loss / i
def lowercase ( self : List[str] , _lowerCamelCase : int , _lowerCamelCase : Dict ):
_snake_case = jax_utils.unreplicate(_lowerCamelCase )
print(f'''SAVING CHECKPOINT IN {save_dir}''' , end=''' ... ''' )
self.model_save_fn(_lowerCamelCase , params=state.params )
with open(os.path.join(_lowerCamelCase , '''opt_state.msgpack''' ) , '''wb''' ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(_lowerCamelCase , '''args.joblib''' ) )
joblib.dump(self.data_collator , os.path.join(_lowerCamelCase , '''data_collator.joblib''' ) )
with open(os.path.join(_lowerCamelCase , '''training_state.json''' ) , '''w''' ) as f:
json.dump({'''step''': state.step.item()} , _lowerCamelCase )
print('''DONE''' )
def _UpperCAmelCase ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] ) -> Tuple:
print(f'''RESTORING CHECKPOINT FROM {save_dir}''' , end=''' ... ''' )
with open(os.path.join(__lowerCamelCase , '''flax_model.msgpack''' ) , '''rb''' ) as f:
_snake_case = from_bytes(state.params , f.read() )
with open(os.path.join(__lowerCamelCase , '''opt_state.msgpack''' ) , '''rb''' ) as f:
_snake_case = from_bytes(state.opt_state , f.read() )
_snake_case = joblib.load(os.path.join(__lowerCamelCase , '''args.joblib''' ) )
_snake_case = joblib.load(os.path.join(__lowerCamelCase , '''data_collator.joblib''' ) )
with open(os.path.join(__lowerCamelCase , '''training_state.json''' ) , '''r''' ) as f:
_snake_case = json.load(__lowerCamelCase )
_snake_case = training_state['''step''']
print('''DONE''' )
return params, opt_state, step, args, data_collator
def _UpperCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] ) -> List[Any]:
_snake_case = num_train_steps - warmup_steps
_snake_case = optax.linear_schedule(init_value=__lowerCamelCase , end_value=__lowerCamelCase , transition_steps=__lowerCamelCase )
_snake_case = optax.linear_schedule(init_value=__lowerCamelCase , end_value=1E-7 , transition_steps=__lowerCamelCase )
_snake_case = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def _UpperCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : List[Any] , __lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[int] ) -> List[str]:
def weight_decay_mask(__lowerCamelCase : List[Any] ):
_snake_case = traverse_util.flatten_dict(__lowerCamelCase )
_snake_case = {k: (v[-1] != '''bias''' and v[-2:] != ('''LayerNorm''', '''scale''')) for k, v in params.items()}
return traverse_util.unflatten_dict(__lowerCamelCase )
_snake_case = scheduler_fn(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
_snake_case = optax.adamw(learning_rate=__lowerCamelCase , weight_decay=__lowerCamelCase , mask=__lowerCamelCase )
return tx, lr
| 40 | 1 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class a__ ( unittest.TestCase ):
lowerCamelCase : List[Any] =MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCamelCase : int =TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def SCREAMING_SNAKE_CASE__ ( self : Dict , a : Union[str, Any] , a : Any , a : Any ):
"""simple docstring"""
__lowerCamelCase = TextaTextGenerationPipeline(model=a , tokenizer=a )
return generator, ["Something to write", "Something else"]
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , a : Any , a : Optional[int] ):
"""simple docstring"""
__lowerCamelCase = generator('''Something there''' )
self.assertEqual(a , [{'''generated_text''': ANY(a )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]['''generated_text'''].startswith('''Something there''' ) )
__lowerCamelCase = generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=a )
self.assertEqual(
a , [
[{'''generated_text''': ANY(a )}, {'''generated_text''': ANY(a )}],
[{'''generated_text''': ANY(a )}, {'''generated_text''': ANY(a )}],
] , )
__lowerCamelCase = generator(
['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=a )
self.assertEqual(
a , [
[{'''generated_text''': ANY(a )}, {'''generated_text''': ANY(a )}],
[{'''generated_text''': ANY(a )}, {'''generated_text''': ANY(a )}],
] , )
with self.assertRaises(a ):
generator(4 )
@require_torch
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
__lowerCamelCase = pipeline('''text2text-generation''' , model='''patrickvonplaten/t5-tiny-random''' , framework='''pt''' )
# do_sample=False necessary for reproducibility
__lowerCamelCase = generator('''Something there''' , do_sample=a )
self.assertEqual(a , [{'''generated_text''': ''''''}] )
__lowerCamelCase = 3
__lowerCamelCase = generator(
'''Something there''' , num_return_sequences=a , num_beams=a , )
__lowerCamelCase = [
{'''generated_text''': '''Beide Beide Beide Beide Beide Beide Beide Beide Beide'''},
{'''generated_text''': '''Beide Beide Beide Beide Beide Beide Beide Beide'''},
{'''generated_text''': ''''''},
]
self.assertEqual(a , a )
__lowerCamelCase = generator('''This is a test''' , do_sample=a , num_return_sequences=2 , return_tensors=a )
self.assertEqual(
a , [
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
] , )
__lowerCamelCase = generator.model.config.eos_token_id
__lowerCamelCase = '''<pad>'''
__lowerCamelCase = generator(
['''This is a test''', '''This is a second test'''] , do_sample=a , num_return_sequences=2 , batch_size=2 , return_tensors=a , )
self.assertEqual(
a , [
[
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
],
[
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
],
] , )
@require_tf
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
__lowerCamelCase = pipeline('''text2text-generation''' , model='''patrickvonplaten/t5-tiny-random''' , framework='''tf''' )
# do_sample=False necessary for reproducibility
__lowerCamelCase = generator('''Something there''' , do_sample=a )
self.assertEqual(a , [{'''generated_text''': ''''''}] )
| 67 | '''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class a__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
__lowerCamelCase = '''ylacombe/bark-small'''
__lowerCamelCase = tempfile.mkdtemp()
__lowerCamelCase = '''en_speaker_1'''
__lowerCamelCase = '''This is a test string'''
__lowerCamelCase = '''speaker_embeddings_path.json'''
__lowerCamelCase = '''speaker_embeddings'''
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , **a : Dict ):
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.checkpoint , **a )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = BarkProcessor(tokenizer=a )
processor.save_pretrained(self.tmpdirname )
__lowerCamelCase = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
__lowerCamelCase = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
__lowerCamelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__lowerCamelCase = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='''(BOS)''' , eos_token='''(EOS)''' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
__lowerCamelCase = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
__lowerCamelCase = 35
__lowerCamelCase = 2
__lowerCamelCase = 8
__lowerCamelCase = {
'''semantic_prompt''': np.ones(a ),
'''coarse_prompt''': np.ones((nb_codebooks_coarse, seq_len) ),
'''fine_prompt''': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
__lowerCamelCase = processor(text=self.input_string , voice_preset=a )
__lowerCamelCase = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(a , np.array([] ) ).tolist() )
# test loading voice preset from npz file
__lowerCamelCase = os.path.join(self.tmpdirname , '''file.npz''' )
np.savez(a , **a )
__lowerCamelCase = processor(text=self.input_string , voice_preset=a )
__lowerCamelCase = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(a , np.array([] ) ).tolist() )
# test loading voice preset from the hub
__lowerCamelCase = processor(text=self.input_string , voice_preset=self.voice_preset )
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = BarkProcessor(tokenizer=a )
__lowerCamelCase = processor(text=self.input_string )
__lowerCamelCase = tokenizer(
self.input_string , padding='''max_length''' , max_length=2_56 , add_special_tokens=a , return_attention_mask=a , return_token_type_ids=a , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 67 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__SCREAMING_SNAKE_CASE : List[Any] = {
'configuration_layoutlmv3': [
'LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP',
'LayoutLMv3Config',
'LayoutLMv3OnnxConfig',
],
'processing_layoutlmv3': ['LayoutLMv3Processor'],
'tokenization_layoutlmv3': ['LayoutLMv3Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Dict = ['LayoutLMv3TokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : str = [
'LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv3ForQuestionAnswering',
'LayoutLMv3ForSequenceClassification',
'LayoutLMv3ForTokenClassification',
'LayoutLMv3Model',
'LayoutLMv3PreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Dict = [
'TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLayoutLMv3ForQuestionAnswering',
'TFLayoutLMv3ForSequenceClassification',
'TFLayoutLMv3ForTokenClassification',
'TFLayoutLMv3Model',
'TFLayoutLMv3PreTrainedModel',
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Dict = ['LayoutLMv3FeatureExtractor']
__SCREAMING_SNAKE_CASE : List[Any] = ['LayoutLMv3ImageProcessor']
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
__SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 284 | import logging
from transformers import PretrainedConfig
__SCREAMING_SNAKE_CASE : Any = logging.getLogger(__name__)
__SCREAMING_SNAKE_CASE : int = {
'bertabs-finetuned-cnndm': 'https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json',
}
class lowercase_ ( __snake_case ):
_lowerCamelCase = 'bertabs'
def __init__( self , lowercase_=30_522 , lowercase_=512 , lowercase_=6 , lowercase_=512 , lowercase_=8 , lowercase_=512 , lowercase_=0.2 , lowercase_=6 , lowercase_=768 , lowercase_=8 , lowercase_=2_048 , lowercase_=0.2 , **lowercase_ , ):
super().__init__(**lowercase_ )
_snake_case : List[Any] = vocab_size
_snake_case : int = max_pos
_snake_case : Tuple = enc_layers
_snake_case : Optional[Any] = enc_hidden_size
_snake_case : Union[str, Any] = enc_heads
_snake_case : str = enc_ff_size
_snake_case : Any = enc_dropout
_snake_case : Tuple = dec_layers
_snake_case : Optional[Any] = dec_hidden_size
_snake_case : Dict = dec_heads
_snake_case : str = dec_ff_size
_snake_case : List[str] = dec_dropout | 284 | 1 |
from __future__ import annotations
import math
def __lowerCamelCase ( UpperCAmelCase_ : int ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(UpperCamelCase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
snake_case : List[str] = [num for num in range(3, 10_00_01, 2) if not is_prime(num)]
def __lowerCamelCase ( UpperCAmelCase_ : int ):
"""simple docstring"""
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise ValueError('''n must be an integer''' )
if n <= 0:
raise ValueError('''n must be >= 0''' )
a :List[Any] = []
for num in range(len(UpperCamelCase_ ) ):
a :List[str] = 0
while 2 * i * i <= odd_composites[num]:
a :int = odd_composites[num] - 2 * i * i
if is_prime(UpperCamelCase_ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(UpperCamelCase_ ) == n:
return list_nums
return []
def __lowerCamelCase ( ):
"""simple docstring"""
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 94 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {
'''configuration_convbert''': ['''CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvBertConfig''', '''ConvBertOnnxConfig'''],
'''tokenization_convbert''': ['''ConvBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['''ConvBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConvBertForMaskedLM''',
'''ConvBertForMultipleChoice''',
'''ConvBertForQuestionAnswering''',
'''ConvBertForSequenceClassification''',
'''ConvBertForTokenClassification''',
'''ConvBertLayer''',
'''ConvBertModel''',
'''ConvBertPreTrainedModel''',
'''load_tf_weights_in_convbert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFConvBertForMaskedLM''',
'''TFConvBertForMultipleChoice''',
'''TFConvBertForQuestionAnswering''',
'''TFConvBertForSequenceClassification''',
'''TFConvBertForTokenClassification''',
'''TFConvBertLayer''',
'''TFConvBertModel''',
'''TFConvBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 340 | 0 |
import operator as op
snake_case = "scaler.pt"
snake_case = "pytorch_model"
snake_case = "random_states"
snake_case = "optimizer"
snake_case = "scheduler"
snake_case = "pytorch_model.bin"
snake_case = "pytorch_model.bin.index.json"
snake_case = "model.safetensors"
snake_case = "model.safetensors.index.json"
snake_case = "1.10.2"
snake_case = "py38"
snake_case = "4.17.0"
snake_case = ["ml.p3.16xlarge", "ml.p3dn.24xlarge", "ml.p4dn.24xlarge"]
snake_case = ["FULL_SHARD", "SHARD_GRAD_OP", "NO_SHARD", "HYBRID_SHARD", "HYBRID_SHARD_ZERO2"]
snake_case = ["TRANSFORMER_BASED_WRAP", "SIZE_BASED_WRAP", "NO_WRAP"]
snake_case = ["BACKWARD_PRE", "BACKWARD_POST", "NO_PREFETCH"]
snake_case = ["FULL_STATE_DICT", "LOCAL_STATE_DICT", "SHARDED_STATE_DICT"]
snake_case = "2.0.1"
snake_case = ["pdsh", "standard", "openmpi", "mvapich"]
snake_case = ["default", "reduce-overhead", "max-autotune"]
snake_case = {">": op.gt, ">=": op.ge, "==": op.eq, "!=": op.ne, "<=": op.le, "<": op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
snake_case = [
"nnodes",
"nproc_per_node",
"rdzv_backend",
"rdzv_endpoint",
"rdzv_id",
"rdzv_conf",
"standalone",
"max_restarts",
"monitor_interval",
"start_method",
"role",
"module",
"m",
"no_python",
"run_path",
"log_dir",
"r",
"redirects",
"t",
"tee",
"node_rank",
"master_addr",
"master_port",
]
snake_case = ["DEEPSPEED", "MULTI_GPU", "FSDP", "MEGATRON_LM"]
snake_case = ["DEEPSPEED", "MULTI_XPU", "FSDP"]
| 368 |
from math import sqrt
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = 0
for i in range(1 , int(sqrt(lowercase ) + 1 ) ):
if n % i == 0 and i != sqrt(lowercase ):
total += i + n // i
elif i == sqrt(lowercase ):
total += i
return total - n
def lowerCamelCase__ ( lowercase = 10000 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = sum(
i
for i in range(1 , lowercase )
if sum_of_divisors(sum_of_divisors(lowercase ) ) == i and sum_of_divisors(lowercase ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 319 | 0 |
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class __UpperCAmelCase :
def __init__( self : Union[str, Any], __A : int, __A : Optional[Any]=1_3, __A : Optional[int]=7, __A : int=True, __A : Dict=True, __A : str=True, __A : Dict=True, __A : Optional[Any]=9_9, __A : Tuple=3_2, __A : Optional[int]=2, __A : Any=4, __A : Dict=3_7, __A : Optional[int]="gelu", __A : Any=0.1, __A : List[str]=0.1, __A : Tuple=5_1_2, __A : Dict=1_6, __A : List[Any]=2, __A : int=0.0_2, __A : Dict=False, __A : Optional[Any]=True, __A : Tuple="None", __A : Tuple=3, __A : Dict=4, __A : Optional[Any]=None, ):
UpperCAmelCase : List[str] = parent
UpperCAmelCase : Any = batch_size
UpperCAmelCase : Dict = seq_length
UpperCAmelCase : Union[str, Any] = is_training
UpperCAmelCase : Optional[Any] = use_input_mask
UpperCAmelCase : Dict = use_token_type_ids
UpperCAmelCase : Union[str, Any] = use_labels
UpperCAmelCase : List[str] = vocab_size
UpperCAmelCase : Optional[Any] = hidden_size
UpperCAmelCase : Any = num_hidden_layers
UpperCAmelCase : Optional[int] = num_attention_heads
UpperCAmelCase : Any = intermediate_size
UpperCAmelCase : Optional[int] = hidden_act
UpperCAmelCase : List[str] = hidden_dropout_prob
UpperCAmelCase : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase : Any = max_position_embeddings
UpperCAmelCase : Tuple = type_vocab_size
UpperCAmelCase : Tuple = type_sequence_label_size
UpperCAmelCase : Dict = initializer_range
UpperCAmelCase : Union[str, Any] = num_labels
UpperCAmelCase : str = num_choices
UpperCAmelCase : Optional[Any] = relative_attention
UpperCAmelCase : List[Any] = position_biased_input
UpperCAmelCase : int = pos_att_type
UpperCAmelCase : Union[str, Any] = scope
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
UpperCAmelCase : Any = None
if self.use_input_mask:
UpperCAmelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase : List[Any] = None
if self.use_token_type_ids:
UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
UpperCAmelCase : List[Any] = None
UpperCAmelCase : Union[str, Any] = None
UpperCAmelCase : List[str] = None
if self.use_labels:
UpperCAmelCase : Any = ids_tensor([self.batch_size], self.type_sequence_label_size )
UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
UpperCAmelCase : str = DebertaVaConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, relative_attention=self.relative_attention, position_biased_input=self.position_biased_input, initializer_range=self.initializer_range, return_dict=__A, )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __magic_name__ ( self : Any, __A : List[Any], __A : int, __A : List[str], __A : Dict, __A : Optional[int], __A : Dict, __A : Tuple ):
UpperCAmelCase : int = TFDebertaVaModel(config=__A )
UpperCAmelCase : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
UpperCAmelCase : Optional[Any] = [input_ids, input_mask]
UpperCAmelCase : Optional[Any] = model(__A )
UpperCAmelCase : Union[str, Any] = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self : str, __A : List[str], __A : List[str], __A : Optional[Any], __A : Tuple, __A : Optional[Any], __A : List[Any], __A : Dict ):
UpperCAmelCase : List[Any] = TFDebertaVaForMaskedLM(config=__A )
UpperCAmelCase : Optional[int] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCAmelCase : Tuple = model(__A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ ( self : Tuple, __A : Optional[int], __A : str, __A : str, __A : List[Any], __A : Optional[int], __A : Tuple, __A : Any ):
UpperCAmelCase : str = self.num_labels
UpperCAmelCase : List[Any] = TFDebertaVaForSequenceClassification(config=__A )
UpperCAmelCase : Union[str, Any] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCAmelCase : Tuple = model(__A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def __magic_name__ ( self : int, __A : Optional[Any], __A : Optional[Any], __A : Any, __A : Any, __A : Tuple, __A : Union[str, Any], __A : int ):
UpperCAmelCase : Dict = self.num_labels
UpperCAmelCase : Any = TFDebertaVaForTokenClassification(config=__A )
UpperCAmelCase : str = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCAmelCase : Optional[int] = model(__A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__ ( self : Optional[int], __A : int, __A : Tuple, __A : int, __A : Any, __A : Dict, __A : List[Any], __A : Optional[Any] ):
UpperCAmelCase : Tuple = TFDebertaVaForQuestionAnswering(config=__A )
UpperCAmelCase : List[Any] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCAmelCase : Union[str, Any] = model(__A )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def __magic_name__ ( self : str ):
UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : str = config_and_inputs
UpperCAmelCase : List[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCamelCase = (
{
"""feature-extraction""": TFDebertaVaModel,
"""fill-mask""": TFDebertaVaForMaskedLM,
"""question-answering""": TFDebertaVaForQuestionAnswering,
"""text-classification""": TFDebertaVaForSequenceClassification,
"""token-classification""": TFDebertaVaForTokenClassification,
"""zero-shot""": TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
def __magic_name__ ( self : Tuple ):
UpperCAmelCase : int = TFDebertaVaModelTester(self )
UpperCAmelCase : Tuple = ConfigTester(self, config_class=__A, hidden_size=3_7 )
def __magic_name__ ( self : Any ):
self.config_tester.run_common_tests()
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__A )
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__A )
def __magic_name__ ( self : int ):
UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__A )
def __magic_name__ ( self : Optional[Any] ):
UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__A )
@slow
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase : Optional[int] = TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''' )
self.assertIsNotNone(__A )
@require_tf
class __UpperCAmelCase ( unittest.TestCase ):
@unittest.skip(reason='''Model not available yet''' )
def __magic_name__ ( self : Any ):
pass
@slow
def __magic_name__ ( self : Tuple ):
UpperCAmelCase : int = TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''' )
UpperCAmelCase : str = tf.constant([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
UpperCAmelCase : int = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
UpperCAmelCase : Optional[int] = model(__A, attention_mask=__A )[0]
UpperCAmelCase : int = tf.constant(
[[[0.2_3_5_6, 0.1_9_4_8, 0.0_3_6_9], [-0.1_0_6_3, 0.3_5_8_6, -0.5_1_5_2], [-0.6_3_9_9, -0.0_2_5_9, -0.2_5_2_5]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4], __A, atol=1E-4 )
| 336 |
def a__ ( UpperCAmelCase : int ) -> int:
UpperCAmelCase : list[list[int]] = [[0 for _ in range(UpperCAmelCase )] for _ in range(m + 1 )]
for i in range(m + 1 ):
UpperCAmelCase : Optional[Any] = 1
for n in range(m + 1 ):
for k in range(1 , UpperCAmelCase ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
_lowerCamelCase : List[Any] = int(input("Enter a number: ").strip())
print(partition(n))
except ValueError:
print("Please enter a number.")
else:
try:
_lowerCamelCase : str = int(sys.argv[1])
print(partition(n))
except ValueError:
print("Please pass a number.")
| 336 | 1 |
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__) # pylint: disable=invalid-name
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,) -> List[str]:
'''simple docstring'''
super().__init__()
self.register_modules(
vae=_lowerCamelCase ,text_encoder=_lowerCamelCase ,tokenizer=_lowerCamelCase ,unet=_lowerCamelCase ,scheduler=_lowerCamelCase ,safety_checker=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,)
def _UpperCAmelCase (self ,_lowerCamelCase = "auto" ) -> Optional[Any]:
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__lowercase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_lowerCamelCase )
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
self.enable_attention_slicing(_lowerCamelCase )
@torch.no_grad()
def __call__(self ,_lowerCamelCase ,_lowerCamelCase = 512 ,_lowerCamelCase = 512 ,_lowerCamelCase = 50 ,_lowerCamelCase = 7.5 ,_lowerCamelCase = None ,_lowerCamelCase = 1 ,_lowerCamelCase = 0.0 ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = "pil" ,_lowerCamelCase = True ,_lowerCamelCase = None ,_lowerCamelCase = 1 ,_lowerCamelCase = None ,**_lowerCamelCase ,) -> Union[str, Any]:
'''simple docstring'''
if isinstance(_lowerCamelCase ,_lowerCamelCase ):
__lowercase = 1
elif isinstance(_lowerCamelCase ,_lowerCamelCase ):
__lowercase = len(_lowerCamelCase )
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(_lowerCamelCase )}" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_lowerCamelCase ,_lowerCamelCase ) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(_lowerCamelCase )}." )
# get prompt text embeddings
__lowercase = self.tokenizer(
_lowerCamelCase ,padding='''max_length''' ,max_length=self.tokenizer.model_max_length ,return_tensors='''pt''' ,)
__lowercase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__lowercase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
f" {self.tokenizer.model_max_length} tokens: {removed_text}" )
__lowercase = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
__lowercase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__lowercase , __lowercase , __lowercase = text_embeddings.shape
__lowercase = text_embeddings.repeat(1 ,_lowerCamelCase ,1 )
__lowercase = text_embeddings.view(bs_embed * num_images_per_prompt ,_lowerCamelCase ,-1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__lowercase = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__lowercase = 42
if negative_prompt is None:
__lowercase = ['''''']
elif type(_lowerCamelCase ) is not type(_lowerCamelCase ):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(_lowerCamelCase )} !="
f" {type(_lowerCamelCase )}." )
elif isinstance(_lowerCamelCase ,_lowerCamelCase ):
__lowercase = [negative_prompt]
elif batch_size != len(_lowerCamelCase ):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(_lowerCamelCase )}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
''' the batch size of `prompt`.''' )
else:
__lowercase = negative_prompt
__lowercase = text_input_ids.shape[-1]
__lowercase = self.tokenizer(
_lowerCamelCase ,padding='''max_length''' ,max_length=_lowerCamelCase ,truncation=_lowerCamelCase ,return_tensors='''pt''' ,)
__lowercase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__lowercase = uncond_embeddings.shape[1]
__lowercase = uncond_embeddings.repeat(_lowerCamelCase ,_lowerCamelCase ,1 )
__lowercase = uncond_embeddings.view(batch_size * num_images_per_prompt ,_lowerCamelCase ,-1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__lowercase = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__lowercase = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__lowercase = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
__lowercase = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__lowercase = torch.randn(
_lowerCamelCase ,generator=_lowerCamelCase ,device='''cpu''' ,dtype=_lowerCamelCase ).to(self.device )
__lowercase = torch.randn(_lowerCamelCase ,generator=_lowerCamelCase ,device='''cpu''' ,dtype=_lowerCamelCase ).to(
self.device )
else:
__lowercase = torch.randn(
_lowerCamelCase ,generator=_lowerCamelCase ,device=self.device ,dtype=_lowerCamelCase )
__lowercase = torch.randn(_lowerCamelCase ,generator=_lowerCamelCase ,device=self.device ,dtype=_lowerCamelCase )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
__lowercase = latents_reference.to(self.device )
__lowercase = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
__lowercase = (latents_shape[3] - latents_shape_reference[3]) // 2
__lowercase = (latents_shape[2] - latents_shape_reference[2]) // 2
__lowercase = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
__lowercase = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
__lowercase = 0 if dx < 0 else dx
__lowercase = 0 if dy < 0 else dy
__lowercase = max(-dx ,0 )
__lowercase = max(-dy ,0 )
# import pdb
# pdb.set_trace()
__lowercase = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(_lowerCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__lowercase = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__lowercase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__lowercase = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__lowercase = {}
if accepts_eta:
__lowercase = eta
for i, t in enumerate(self.progress_bar(_lowerCamelCase ) ):
# expand the latents if we are doing classifier free guidance
__lowercase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__lowercase = self.scheduler.scale_model_input(_lowerCamelCase ,_lowerCamelCase )
# predict the noise residual
__lowercase = self.unet(_lowerCamelCase ,_lowerCamelCase ,encoder_hidden_states=_lowerCamelCase ).sample
# perform guidance
if do_classifier_free_guidance:
__lowercase , __lowercase = noise_pred.chunk(2 )
__lowercase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__lowercase = self.scheduler.step(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,**_lowerCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
__lowercase = 1 / 0.1_8_2_1_5 * latents
__lowercase = self.vae.decode(_lowerCamelCase ).sample
__lowercase = (image / 2 + 0.5).clamp(0 ,1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__lowercase = image.cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
if self.safety_checker is not None:
__lowercase = self.feature_extractor(self.numpy_to_pil(_lowerCamelCase ) ,return_tensors='''pt''' ).to(
self.device )
__lowercase , __lowercase = self.safety_checker(
images=_lowerCamelCase ,clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
__lowercase = None
if output_type == "pil":
__lowercase = self.numpy_to_pil(_lowerCamelCase )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=_lowerCamelCase ,nsfw_content_detected=_lowerCamelCase )
| 217 |
'''simple docstring'''
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
_SCREAMING_SNAKE_CASE = {
'''169M''': 1_2,
'''430M''': 2_4,
'''1B5''': 2_4,
'''3B''': 3_2,
'''7B''': 3_2,
'''14B''': 4_0,
}
_SCREAMING_SNAKE_CASE = {
'''169M''': 7_6_8,
'''430M''': 1_0_2_4,
'''1B5''': 2_0_4_8,
'''3B''': 2_5_6_0,
'''7B''': 4_0_9_6,
'''14B''': 5_1_2_0,
}
def _lowerCAmelCase ( lowerCamelCase_ : Dict ):
__lowercase = list(state_dict.keys() )
for name in state_dict_keys:
__lowercase = state_dict.pop(lowerCamelCase_ )
# emb -> embedding
if name.startswith('''emb.''' ):
__lowercase = name.replace('''emb.''' , '''embeddings.''' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('''blocks.0.ln0''' ):
__lowercase = name.replace('''blocks.0.ln0''' , '''blocks.0.pre_ln''' )
# att -> attention
__lowercase = re.sub(r'''blocks\.(\d+)\.att''' , r'''blocks.\1.attention''' , lowerCamelCase_ )
# ffn -> feed_forward
__lowercase = re.sub(r'''blocks\.(\d+)\.ffn''' , r'''blocks.\1.feed_forward''' , lowerCamelCase_ )
# time_mix_k -> time_mix_key and reshape
if name.endswith('''.time_mix_k''' ):
__lowercase = name.replace('''.time_mix_k''' , '''.time_mix_key''' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('''.time_mix_v''' ):
__lowercase = name.replace('''.time_mix_v''' , '''.time_mix_value''' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('''.time_mix_r''' ):
__lowercase = name.replace('''.time_mix_r''' , '''.time_mix_receptance''' )
if name != "head.weight":
__lowercase = '''rwkv.''' + name
__lowercase = weight
return state_dict
def _lowerCAmelCase ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Any=False , lowerCamelCase_ : int=None ):
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print('''No `--tokenizer_file` provided, we will use the default tokenizer.''' )
__lowercase = 5_0_2_7_7
__lowercase = AutoTokenizer.from_pretrained('''EleutherAI/gpt-neox-20b''' )
else:
__lowercase = PreTrainedTokenizerFast(tokenizer_file=lowerCamelCase_ )
__lowercase = len(lowerCamelCase_ )
tokenizer.save_pretrained(lowerCamelCase_ )
# 2. Build the config
__lowercase = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
__lowercase = candidate
break
if size is None:
raise ValueError('''Could not infer the size, please provide it with the `--size` argument.''' )
if size not in possible_sizes:
raise ValueError(f"`size` should be one of {possible_sizes}, got {size}." )
__lowercase = RwkvConfig(
vocab_size=lowerCamelCase_ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(lowerCamelCase_ )
# 3. Download model file then convert state_dict
__lowercase = hf_hub_download(lowerCamelCase_ , lowerCamelCase_ )
__lowercase = torch.load(lowerCamelCase_ , map_location='''cpu''' )
__lowercase = convert_state_dict(lowerCamelCase_ )
# 4. Split in shards and save
__lowercase , __lowercase = shard_checkpoint(lowerCamelCase_ )
for shard_file, shard in shards.items():
torch.save(lowerCamelCase_ , os.path.join(lowerCamelCase_ , lowerCamelCase_ ) )
if index is not None:
__lowercase = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
# Save the index as well
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
__lowercase = json.dumps(lowerCamelCase_ , indent=2 , sort_keys=lowerCamelCase_ ) + '''\n'''
f.write(lowerCamelCase_ )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'''Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.''' )
__lowercase = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
__lowercase = torch.load(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(lowerCamelCase_ , lowerCamelCase_ ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('''Please provide a `model_name` to push the model to the Hub.''' )
__lowercase = AutoModelForCausalLM.from_pretrained(lowerCamelCase_ )
model.push_to_hub(lowerCamelCase_ , max_shard_size='''2GB''' )
tokenizer.push_to_hub(lowerCamelCase_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--repo_id''', default=None, type=str, required=True, help='''Repo ID from which to pull the checkpoint.'''
)
parser.add_argument(
'''--checkpoint_file''', default=None, type=str, required=True, help='''Name of the checkpoint file in the repo.'''
)
parser.add_argument(
'''--output_dir''', default=None, type=str, required=True, help='''Where to save the converted model.'''
)
parser.add_argument(
'''--tokenizer_file''',
default=None,
type=str,
help='''Path to the tokenizer file to use (if not provided, only the model is converted).''',
)
parser.add_argument(
'''--size''',
default=None,
type=str,
help='''Size of the model. Will be inferred from the `checkpoint_file` if not passed.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Push to the Hub the converted model.''',
)
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help='''Name of the pushed model on the Hub, including the username / organization.''',
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 217 | 1 |
from __future__ import annotations
def _lowerCamelCase( lowercase__ , lowercase__ ) -> bool:
'''simple docstring'''
if len(lowercase__ ) == 0:
return False
__lowercase= len(lowercase__ ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , lowercase__ )
else:
return binary_search(a_list[midpoint + 1 :] , lowercase__ )
if __name__ == "__main__":
lowerCAmelCase = input('''Enter numbers separated by comma:\n''').strip()
lowerCAmelCase = [int(item.strip()) for item in user_input.split(''',''')]
lowerCAmelCase = int(input('''Enter the number to be found in the list:\n''').strip())
lowerCAmelCase = '''''' if binary_search(sequence, target) else '''not '''
print(F'{target} was {not_str}found in {sequence}')
| 295 |
def _lowerCamelCase( lowercase__ ) -> int:
'''simple docstring'''
if collection == []:
return []
# get some information about the collection
__lowercase= len(lowercase__ )
__lowercase= max(lowercase__ )
__lowercase= min(lowercase__ )
# create the counting array
__lowercase= coll_max + 1 - coll_min
__lowercase= [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , lowercase__ ):
__lowercase= counting_arr[i] + counting_arr[i - 1]
# create the output collection
__lowercase= [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , lowercase__ ) ):
__lowercase= collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def _lowerCamelCase( lowercase__ ) -> List[str]:
'''simple docstring'''
return "".join([chr(lowercase__ ) for i in counting_sort([ord(lowercase__ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string('''thisisthestring''') == "eghhiiinrsssttt"
lowerCAmelCase = input('''Enter numbers separated by a comma:\n''').strip()
lowerCAmelCase = [int(item) for item in user_input.split(''',''')]
print(counting_sort(unsorted))
| 295 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
lowerCamelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCamelCase = {
"""vocab_file""": {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""unc-nlp/lxmert-base-uncased""": (
"""https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
lowerCamelCase = {
"""unc-nlp/lxmert-base-uncased""": 512,
}
lowerCamelCase = {
"""unc-nlp/lxmert-base-uncased""": {"""do_lower_case""": True},
}
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = LxmertTokenizer
def __init__( self : Dict , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : int=True , _lowerCAmelCase : Dict="[UNK]" , _lowerCAmelCase : List[Any]="[SEP]" , _lowerCAmelCase : Union[str, Any]="[PAD]" , _lowerCAmelCase : int="[CLS]" , _lowerCAmelCase : Tuple="[MASK]" , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : Optional[int]=None , **_lowerCAmelCase : List[Any] , ):
'''simple docstring'''
super().__init__(
_lowerCAmelCase , tokenizer_file=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , tokenize_chinese_chars=_lowerCAmelCase , strip_accents=_lowerCAmelCase , **_lowerCAmelCase , )
__lowercase =json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get('lowercase' , _lowerCAmelCase) != do_lower_case
or normalizer_state.get('strip_accents' , _lowerCAmelCase) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _lowerCAmelCase) != tokenize_chinese_chars
):
__lowercase =getattr(_lowerCAmelCase , normalizer_state.pop('type'))
__lowercase =do_lower_case
__lowercase =strip_accents
__lowercase =tokenize_chinese_chars
__lowercase =normalizer_class(**_lowerCAmelCase)
__lowercase =do_lower_case
def __lowerCamelCase ( self : Union[str, Any] , _lowerCAmelCase : int , _lowerCAmelCase : Dict=None):
'''simple docstring'''
__lowercase =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCamelCase ( self : List[str] , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None):
'''simple docstring'''
__lowercase =[self.sep_token_id]
__lowercase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def __lowerCamelCase ( self : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None):
'''simple docstring'''
__lowercase =self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase)
return tuple(_lowerCAmelCase)
| 48 |
'''simple docstring'''
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = ["""image_processor""", """tokenizer"""]
lowerCAmelCase__ = """BridgeTowerImageProcessor"""
lowerCAmelCase__ = ("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self : Any , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[Any]):
'''simple docstring'''
super().__init__(_lowerCAmelCase , _lowerCAmelCase)
def __call__( self : List[str] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _lowerCAmelCase : bool = True , _lowerCAmelCase : Union[bool, str, PaddingStrategy] = False , _lowerCAmelCase : Union[bool, str, TruncationStrategy] = None , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : int = 0 , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Optional[bool] = None , _lowerCAmelCase : Optional[bool] = None , _lowerCAmelCase : bool = False , _lowerCAmelCase : bool = False , _lowerCAmelCase : bool = False , _lowerCAmelCase : bool = False , _lowerCAmelCase : bool = True , _lowerCAmelCase : Optional[Union[str, TensorType]] = None , **_lowerCAmelCase : Optional[Any] , ):
'''simple docstring'''
__lowercase =self.tokenizer(
text=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase , stride=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , return_overflowing_tokens=_lowerCAmelCase , return_special_tokens_mask=_lowerCAmelCase , return_offsets_mapping=_lowerCAmelCase , return_length=_lowerCAmelCase , verbose=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase , )
# add pixel_values + pixel_mask
__lowercase =self.image_processor(
_lowerCAmelCase , return_tensors=_lowerCAmelCase , do_normalize=_lowerCAmelCase , do_center_crop=_lowerCAmelCase , **_lowerCAmelCase)
encoding.update(_lowerCAmelCase)
return encoding
def __lowerCamelCase ( self : List[str] , *_lowerCAmelCase : Dict , **_lowerCAmelCase : str):
'''simple docstring'''
return self.tokenizer.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase)
def __lowerCamelCase ( self : Optional[Any] , *_lowerCAmelCase : Tuple , **_lowerCAmelCase : Union[str, Any]):
'''simple docstring'''
return self.tokenizer.decode(*_lowerCAmelCase , **_lowerCAmelCase)
@property
def __lowerCamelCase ( self : str):
'''simple docstring'''
__lowercase =self.tokenizer.model_input_names
__lowercase =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
| 48 | 1 |
"""simple docstring"""
class UpperCamelCase :
def __init__( self, lowerCAmelCase__) -> Union[str, Any]:
# we need a list not a string, so do something to change the type
snake_case_ = arr.split(',')
def a_ ( self) -> Tuple:
snake_case_ = [int(self.array[0])] * len(self.array)
snake_case_ = [int(self.array[0])] * len(self.array)
for i in range(1, len(self.array)):
snake_case_ = max(
int(self.array[i]) + sum_value[i - 1], int(self.array[i]))
snake_case_ = max(sum_value[i], rear[i - 1])
return rear[len(self.array) - 1]
if __name__ == "__main__":
__UpperCamelCase = input('''please input some numbers:''')
__UpperCamelCase = SubArray(whole_array)
__UpperCamelCase = array.solve_sub_array()
print(('''the results is:''', re))
| 69 |
'''simple docstring'''
from __future__ import annotations
from typing import Generic, TypeVar
a_ : List[str] = TypeVar("""T""")
class snake_case ( Generic[T] ):
"""simple docstring"""
def __init__( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = data
lowerCamelCase_ = self
lowerCamelCase_ = 0
class snake_case ( Generic[T] ):
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
# map from node name to the node object
lowerCamelCase_ = {}
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
# create a new set with x as its member
lowerCamelCase_ = DisjointSetTreeNode(UpperCamelCase )
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
# find the set x belongs to (with path-compression)
lowerCamelCase_ = self.map[data]
if elem_ref != elem_ref.parent:
lowerCamelCase_ = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def snake_case ( self , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
# helper function for union operation
if nodea.rank > nodea.rank:
lowerCamelCase_ = nodea
else:
lowerCamelCase_ = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def snake_case ( self , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
# merge 2 disjoint sets
self.link(self.find_set(UpperCamelCase ) , self.find_set(UpperCamelCase ) )
class snake_case ( Generic[T] ):
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
# connections: map from the node to the neighbouring nodes (with weights)
lowerCamelCase_ = {}
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
# add a node ONLY if its not present in the graph
if node not in self.connections:
lowerCamelCase_ = {}
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
# add an edge with the given weight
self.add_node(UpperCamelCase )
self.add_node(UpperCamelCase )
lowerCamelCase_ = weight
lowerCamelCase_ = weight
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = []
lowerCamelCase_ = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda UpperCamelCase : x[2] )
# creating the disjoint set
lowerCamelCase_ = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(UpperCamelCase )
# MST generation
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = edges[index]
index += 1
lowerCamelCase_ = disjoint_set.find_set(UpperCamelCase )
lowerCamelCase_ = disjoint_set.find_set(UpperCamelCase )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(UpperCamelCase , UpperCamelCase , UpperCamelCase )
disjoint_set.union(UpperCamelCase , UpperCamelCase )
return graph
| 55 | 0 |
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def __UpperCamelCase ( ):
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
'''-m''' , '''--pretrained_model_name_or_path''' , type=_A , default=_A , required=_A , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , )
parser.add_argument(
'''-c''' , '''--caption''' , type=_A , default='''robotic cat with wings''' , help='''Text used to generate images.''' , )
parser.add_argument(
'''-n''' , '''--images_num''' , type=_A , default=4 , help='''How much images to generate.''' , )
parser.add_argument(
'''-s''' , '''--seed''' , type=_A , default=42 , help='''Seed for random process.''' , )
parser.add_argument(
'''-ci''' , '''--cuda_id''' , type=_A , default=0 , help='''cuda_id.''' , )
lowerCAmelCase_ = parser.parse_args()
return args
def __UpperCamelCase ( _A , _A , _A ):
if not len(_A ) == rows * cols:
raise ValueError('''The specified number of rows and columns are not correct.''' )
lowerCAmelCase_ , lowerCAmelCase_ = imgs[0].size
lowerCAmelCase_ = Image.new('''RGB''' , size=(cols * w, rows * h) )
lowerCAmelCase_ , lowerCAmelCase_ = grid.size
for i, img in enumerate(_A ):
grid.paste(_A , box=(i % cols * w, i // cols * h) )
return grid
def __UpperCamelCase ( _A , _A="robotic cat with wings" , _A=7.5 , _A=50 , _A=1 , _A=42 , ):
lowerCAmelCase_ = torch.Generator(pipeline.device ).manual_seed(_A )
lowerCAmelCase_ = pipeline(
_A , guidance_scale=_A , num_inference_steps=_A , generator=_A , num_images_per_prompt=_A , ).images
lowerCAmelCase_ = int(math.sqrt(_A ) )
lowerCAmelCase_ = image_grid(_A , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
_A = parse_args()
# Load models and create wrapper for stable diffusion
_A = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='''tokenizer''')
_A = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''text_encoder''')
_A = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='''vae''')
_A = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''unet''')
_A = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
_A = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, '''best_model.pt''')):
_A = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, '''unet''', unet)
else:
_A = unet.to(torch.device('''cuda''', args.cuda_id))
_A = pipeline.to(unet.device)
_A , _A = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '''{}.png'''.format('''_'''.join(args.caption.split()))))
_A = os.path.join(args.pretrained_model_name_or_path, '''_'''.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '''{}.png'''.format(idx + 1)))
| 167 |
def __UpperCamelCase ( _A ):
if length <= 0 or not isinstance(_A , _A ):
raise ValueError('''Length must be a positive integer.''' )
return [n * (2 * n - 1) for n in range(_A )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 167 | 1 |
"""simple docstring"""
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
UpperCamelCase_ = 10
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
for i in range(UpperCAmelCase , UpperCAmelCase ):
if array[i] == target:
return i
return -1
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
a_ = 0
a_ = len(UpperCAmelCase )
while left <= right:
if right - left < precision:
return lin_search(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
a_ = (left + right) // 3 + 1
a_ = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
a_ = one_third - 1
elif array[two_third] < target:
a_ = two_third + 1
else:
a_ = one_third + 1
a_ = two_third - 1
else:
return -1
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
if left < right:
if right - left < precision:
return lin_search(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
a_ = (left + right) // 3 + 1
a_ = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(UpperCAmelCase , one_third - 1 , UpperCAmelCase , UpperCAmelCase )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , UpperCAmelCase , UpperCAmelCase )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase_ = input('Enter numbers separated by comma:\n').strip()
UpperCamelCase_ = [int(item.strip()) for item in user_input.split(',')]
assert collection == sorted(collection), F"List must be ordered.\n{collection}."
UpperCamelCase_ = int(input('Enter the number to be found in the list:\n').strip())
UpperCamelCase_ = ite_ternary_search(collection, target)
UpperCamelCase_ = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(F"""Iterative search: {target} found at positions: {resulta}""")
print(F"""Recursive search: {target} found at positions: {resulta}""")
else:
print('Not found') | 243 |
"""simple docstring"""
import math
import os
import sys
def UpperCamelCase ( UpperCAmelCase ) ->str:
"""simple docstring"""
a_ = ""
try:
with open(UpperCAmelCase , "rb" ) as binary_file:
a_ = binary_file.read()
for dat in data:
a_ = F'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->None:
"""simple docstring"""
lexicon.pop(UpperCAmelCase )
a_ = last_match_id
if math.loga(UpperCAmelCase ).is_integer():
for curr_key in lexicon:
a_ = "0" + lexicon[curr_key]
a_ = bin(UpperCAmelCase )[2:]
def UpperCamelCase ( UpperCAmelCase ) ->str:
"""simple docstring"""
a_ = {"0": "0", "1": "1"}
a_ , a_ = "", ""
a_ = len(UpperCAmelCase )
for i in range(len(UpperCAmelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
a_ = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
index += 1
a_ = ""
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
a_ = lexicon[curr_string]
result += last_match_id
return result
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->str:
"""simple docstring"""
a_ = os.path.getsize(UpperCAmelCase )
a_ = bin(UpperCAmelCase )[2:]
a_ = len(UpperCAmelCase )
return "0" * (length_length - 1) + file_length_binary + compressed
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->None:
"""simple docstring"""
a_ = 8
try:
with open(UpperCAmelCase , "wb" ) as opened_file:
a_ = [
to_write[i : i + byte_length]
for i in range(0 , len(UpperCAmelCase ) , UpperCAmelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(UpperCAmelCase , 2 ).to_bytes(1 , byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->None:
"""simple docstring"""
a_ = read_file_binary(UpperCAmelCase )
a_ = compress_data(UpperCAmelCase )
a_ = add_file_length(UpperCAmelCase , UpperCAmelCase )
write_file_binary(UpperCAmelCase , UpperCAmelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2]) | 243 | 1 |
from __future__ import annotations
def _lowerCAmelCase ( UpperCAmelCase__ : int | str ) ->bool:
A__ : int = str(_UpperCamelCase )
return n == n[::-1]
def _lowerCAmelCase ( UpperCAmelCase__ : int = 1_0_0_0_0_0_0 ) ->Any:
A__ : int = 0
for i in range(1, _UpperCamelCase ):
if is_palindrome(_UpperCamelCase ) and is_palindrome(bin(_UpperCamelCase ).split("""b""" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 368 |
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
A_ = random.Random()
def _lowerCAmelCase ( UpperCAmelCase__ : Dict, UpperCAmelCase__ : Tuple=1.0, UpperCAmelCase__ : Optional[int]=None, UpperCAmelCase__ : str=None ) ->Union[str, Any]:
if rng is None:
A__ : Optional[int] = global_rng
A__ : Optional[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : Union[str, Any] , snake_case : str , snake_case : List[str]=7 , snake_case : str=400 , snake_case : Optional[Any]=2000 , snake_case : Union[str, Any]=10 , snake_case : str=160 , snake_case : List[str]=8 , snake_case : List[Any]=0.0 , snake_case : Optional[Any]=4000 , snake_case : Any=False , snake_case : int=True , ):
'''simple docstring'''
A__ : Any = parent
A__ : str = batch_size
A__ : List[str] = min_seq_length
A__ : Dict = max_seq_length
A__ : str = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
A__ : Dict = padding_value
A__ : Optional[Any] = sampling_rate
A__ : Any = return_attention_mask
A__ : Optional[int] = do_normalize
A__ : Tuple = feature_size
A__ : Optional[Any] = chunk_length
A__ : Union[str, Any] = hop_length
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _UpperCamelCase ( self : Union[str, Any] , snake_case : Dict=False , snake_case : Optional[Any]=False ):
'''simple docstring'''
def _flatten(snake_case : Dict ):
return list(itertools.chain(*snake_case ) )
if equal_length:
A__ : Dict = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
A__ : Optional[int] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
A__ : List[str] = [np.asarray(snake_case ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase ):
snake_case_ = WhisperFeatureExtractor if is_speech_available() else None
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
A__ : str = WhisperFeatureExtractionTester(self )
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : List[Any] = feat_extract_first.save_pretrained(snake_case )[0]
check_json_file_has_correct_format(snake_case )
A__ : Union[str, Any] = self.feature_extraction_class.from_pretrained(snake_case )
A__ : str = feat_extract_first.to_dict()
A__ : Union[str, Any] = feat_extract_second.to_dict()
A__ : List[Any] = feat_extract_first.mel_filters
A__ : Optional[Any] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(snake_case , snake_case ) )
self.assertEqual(snake_case , snake_case )
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Any = os.path.join(snake_case , """feat_extract.json""" )
feat_extract_first.to_json_file(snake_case )
A__ : int = self.feature_extraction_class.from_json_file(snake_case )
A__ : Dict = feat_extract_first.to_dict()
A__ : str = feat_extract_second.to_dict()
A__ : str = feat_extract_first.mel_filters
A__ : Dict = feat_extract_second.mel_filters
self.assertTrue(np.allclose(snake_case , snake_case ) )
self.assertEqual(snake_case , snake_case )
def _UpperCamelCase ( self : Any ):
'''simple docstring'''
A__ : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
A__ : str = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
A__ : Union[str, Any] = [np.asarray(snake_case ) for speech_input in speech_inputs]
# Test feature size
A__ : Dict = feature_extractor(snake_case , padding="""max_length""" , return_tensors="""np""" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
A__ : str = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features
A__ : Optional[int] = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features
self.assertTrue(np.allclose(snake_case , snake_case , atol=1e-3 ) )
# Test batched
A__ : List[str] = feature_extractor(snake_case , return_tensors="""np""" ).input_features
A__ : List[str] = feature_extractor(snake_case , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(snake_case , snake_case ):
self.assertTrue(np.allclose(snake_case , snake_case , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
A__ : Tuple = [floats_list((1, x) )[0] for x in (800, 800, 800)]
A__ : str = np.asarray(snake_case )
A__ : List[str] = feature_extractor(snake_case , return_tensors="""np""" ).input_features
A__ : Optional[int] = feature_extractor(snake_case , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(snake_case , snake_case ):
self.assertTrue(np.allclose(snake_case , snake_case , atol=1e-3 ) )
# Test truncation required
A__ : Optional[Any] = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
A__ : Union[str, Any] = [np.asarray(snake_case ) for speech_input in speech_inputs]
A__ : Union[str, Any] = [x[: feature_extractor.n_samples] for x in speech_inputs]
A__ : str = [np.asarray(snake_case ) for speech_input in speech_inputs_truncated]
A__ : Optional[int] = feature_extractor(snake_case , return_tensors="""np""" ).input_features
A__ : str = feature_extractor(snake_case , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(snake_case , snake_case ):
self.assertTrue(np.allclose(snake_case , snake_case , atol=1e-3 ) )
def _UpperCamelCase ( self : str ):
'''simple docstring'''
import torch
A__ : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A__ : List[str] = np.random.rand(100 , 32 ).astype(np.floataa )
A__ : Tuple = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
A__ : Optional[Any] = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
A__ : Optional[int] = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def _UpperCamelCase ( self : Optional[Any] , snake_case : Optional[int] ):
'''simple docstring'''
A__ : int = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
A__ : Union[str, Any] = ds.sort("""id""" ).select(range(snake_case ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
A__ : str = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
A__ : Optional[Any] = self._load_datasamples(1 )
A__ : Union[str, Any] = WhisperFeatureExtractor()
A__ : List[str] = feature_extractor(snake_case , return_tensors="""pt""" ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , snake_case , atol=1e-4 ) )
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A__ : Union[str, Any] = self._load_datasamples(1 )[0]
A__ : Any = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue
A__ : str = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=snake_case )[0]
self.assertTrue(np.all(np.mean(snake_case ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(snake_case ) - 1 ) < 1e-3 ) )
| 296 | 0 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
A__ = logging.get_logger(__name__)
A__ = {"""vocab_file""": """spiece.model"""}
A__ = {
"""vocab_file""": {
"""TsinghuaAI/CPM-Generate""": """https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model""",
}
}
class __lowerCAmelCase ( lowerCamelCase__ ):
def __init__( self , _snake_case , _snake_case=False , _snake_case=True , _snake_case=False , _snake_case="<s>" , _snake_case="</s>" , _snake_case="<unk>" , _snake_case="<sep>" , _snake_case="<pad>" , _snake_case="<cls>" , _snake_case="<mask>" , _snake_case=["<eop>", "<eod>"] , _snake_case = None , **_snake_case , ):
"""simple docstring"""
_lowerCAmelCase = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else mask_token
_lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_snake_case , remove_space=_snake_case , keep_accents=_snake_case , bos_token=_snake_case , eos_token=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , additional_special_tokens=_snake_case , sp_model_kwargs=self.sp_model_kwargs , **_snake_case , )
_lowerCAmelCase = 3
_lowerCAmelCase = do_lower_case
_lowerCAmelCase = remove_space
_lowerCAmelCase = keep_accents
_lowerCAmelCase = vocab_file
_lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_snake_case )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"""You need to install jieba to use CpmTokenizer or CpmTokenizerFast. """
"""See https://pypi.org/project/jieba/ for installation.""" )
_lowerCAmelCase = jieba
_lowerCAmelCase = str.maketrans(""" \n""" , """\u2582\u2583""" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def snake_case ( self ):
"""simple docstring"""
return len(self.sp_model )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = {self.convert_ids_to_tokens(_snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
_lowerCAmelCase = self.__dict__.copy()
_lowerCAmelCase = None
return state
def __setstate__( self , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_lowerCAmelCase = {}
_lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case ( self , _snake_case ):
"""simple docstring"""
if self.remove_space:
_lowerCAmelCase = """ """.join(inputs.strip().split() )
else:
_lowerCAmelCase = inputs
_lowerCAmelCase = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
_lowerCAmelCase = unicodedata.normalize("""NFKD""" , _snake_case )
_lowerCAmelCase = """""".join([c for c in outputs if not unicodedata.combining(_snake_case )] )
if self.do_lower_case:
_lowerCAmelCase = outputs.lower()
return outputs
def snake_case ( self , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = self.preprocess_text(_snake_case )
_lowerCAmelCase = self.sp_model.encode(_snake_case , out_type=_snake_case )
_lowerCAmelCase = []
for piece in pieces:
if len(_snake_case ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
_lowerCAmelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(_snake_case , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_lowerCAmelCase = cur_pieces[1:]
else:
_lowerCAmelCase = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_snake_case )
else:
new_pieces.append(_snake_case )
return new_pieces
def snake_case ( self , _snake_case ):
"""simple docstring"""
return self.sp_model.PieceToId(_snake_case )
def snake_case ( self , _snake_case ):
"""simple docstring"""
return self.sp_model.IdToPiece(_snake_case )
def snake_case ( self , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = """""".join(_snake_case ).replace(_snake_case , """ """ ).strip()
return out_string
def snake_case ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
_lowerCAmelCase = [self.sep_token_id]
_lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def snake_case ( self , _snake_case , _snake_case = None , _snake_case = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_snake_case , token_ids_a=_snake_case , already_has_special_tokens=_snake_case )
if token_ids_a is not None:
return ([0] * len(_snake_case )) + [1] + ([0] * len(_snake_case )) + [1, 1]
return ([0] * len(_snake_case )) + [1, 1]
def snake_case ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
_lowerCAmelCase = [self.sep_token_id]
_lowerCAmelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def snake_case ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
if not os.path.isdir(_snake_case ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCAmelCase = os.path.join(
_snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(_snake_case , """wb""" ) as fi:
_lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(_snake_case )
return (out_vocab_file,)
def snake_case ( self , *_snake_case , **_snake_case ):
"""simple docstring"""
_lowerCAmelCase = super()._decode(*_snake_case , **_snake_case )
_lowerCAmelCase = text.replace(""" """ , """""" ).replace("""\u2582""" , """ """ ).replace("""\u2583""" , """\n""" )
return text
| 82 |
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class __lowerCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
@register_to_config
def __init__( self , _snake_case = 768 , ):
"""simple docstring"""
super().__init__()
_lowerCAmelCase = nn.Parameter(torch.zeros(1 , _snake_case ) )
_lowerCAmelCase = nn.Parameter(torch.ones(1 , _snake_case ) )
def snake_case ( self , _snake_case = None , _snake_case = None , ):
"""simple docstring"""
_lowerCAmelCase = nn.Parameter(self.mean.to(_snake_case ).to(_snake_case ) )
_lowerCAmelCase = nn.Parameter(self.std.to(_snake_case ).to(_snake_case ) )
return self
def snake_case ( self , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = (embeds - self.mean) * 1.0 / self.std
return embeds
def snake_case ( self , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = (embeds * self.std) + self.mean
return embeds
| 82 | 1 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : Any = logging.get_logger(__name__)
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Dict ='encoder-decoder'
lowercase : int =True
def __init__( self, **lowerCAmelCase ):
"""simple docstring"""
super().__init__(**lowerCAmelCase )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
lowerCamelCase_ =kwargs.pop('''encoder''' )
lowerCamelCase_ =encoder_config.pop('''model_type''' )
lowerCamelCase_ =kwargs.pop('''decoder''' )
lowerCamelCase_ =decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
lowerCamelCase_ =AutoConfig.for_model(lowerCAmelCase, **lowerCAmelCase )
lowerCamelCase_ =AutoConfig.for_model(lowerCAmelCase, **lowerCAmelCase )
lowerCamelCase_ =True
@classmethod
def lowercase__ ( cls, lowerCAmelCase, lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
lowerCamelCase_ =True
lowerCamelCase_ =True
return cls(encoder=encoder_config.to_dict(), decoder=decoder_config.to_dict(), **lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =copy.deepcopy(self.__dict__ )
lowerCamelCase_ =self.encoder.to_dict()
lowerCamelCase_ =self.decoder.to_dict()
lowerCamelCase_ =self.__class__.model_type
return output
| 6 |
'''simple docstring'''
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def a_ ( __snake_case : Optional[int] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ =[
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(__snake_case , __snake_case )
def a_ ( __snake_case : List[Any] ) -> int:
"""simple docstring"""
lowerCamelCase_, lowerCamelCase_ =emb.weight.shape
lowerCamelCase_ =nn.Linear(__snake_case , __snake_case , bias=__snake_case )
lowerCamelCase_ =emb.weight.data
return lin_layer
def a_ ( __snake_case : Union[str, Any] , __snake_case : Tuple=None ) -> Dict:
"""simple docstring"""
lowerCamelCase_ ={}
for old_key in state_dict.keys():
lowerCamelCase_ =old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
lowerCamelCase_ =key.replace('''moe_layer.experts.0''' , F'''ffn.experts.expert_{expert_idx}''' )
else:
lowerCamelCase_ =key.replace('''moe_layer.experts.''' , '''ffn.experts.expert_''' )
if "gate" in key:
lowerCamelCase_ =key.replace('''.moe_layer.gate.wg''' , '''.ffn.router.classifier''' )
if "fc2" and "experts" not in key:
lowerCamelCase_ =key.replace('''.fc2.''' , '''.ffn.fc2.''' )
if "fc1" and "experts" not in key:
lowerCamelCase_ =key.replace('''.fc1.''' , '''.ffn.fc1.''' )
if ".encoder_attn." in key:
lowerCamelCase_ =key.replace('''.encoder_attn.''' , '''.cross_attention.''' )
if "encoder_attn_layer_norm" in key:
lowerCamelCase_ =key.replace('''encoder_attn_layer_norm''' , '''cross_attention_layer_norm''' )
if "final_layer_norm" in key:
lowerCamelCase_ =key.replace('''final_layer_norm''' , '''ff_layer_norm''' )
lowerCamelCase_ =state_dict[old_key]
return new_dict
def a_ ( __snake_case : Optional[Any] , __snake_case : Any , __snake_case : Optional[int] , __snake_case : Tuple , __snake_case : str = WEIGHTS_NAME ) -> Dict:
"""simple docstring"""
lowerCamelCase_ =[]
lowerCamelCase_ =0
os.makedirs(__snake_case , exist_ok=__snake_case )
for expert in range(__snake_case ):
lowerCamelCase_ =switch_checkpoint_path + F'''-rank-{expert}.pt'''
if os.path.isfile(__snake_case ):
lowerCamelCase_ =torch.load(__snake_case )['''model''']
remove_ignore_keys_(__snake_case )
lowerCamelCase_ =rename_fairseq_keys(__snake_case , __snake_case )
lowerCamelCase_ =os.path.join(
__snake_case , weights_name.replace('''.bin''' , F'''-{len(__snake_case )+1:05d}-of-???.bin''' ) )
torch.save(__snake_case , __snake_case )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(__snake_case )[0]].dtype )
# Add the last block
lowerCamelCase_ =os.path.join(__snake_case , weights_name.replace('''.bin''' , F'''-{len(__snake_case )+1:05d}-of-???.bin''' ) )
lowerCamelCase_ =torch.load(switch_checkpoint_path + '''-shared.pt''' )['''model''']
remove_ignore_keys_(__snake_case )
lowerCamelCase_ =rename_fairseq_keys(__snake_case , __snake_case )
lowerCamelCase_ =shared_weights['''decoder.embed_tokens.weight''']
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(__snake_case ) == 1:
lowerCamelCase_ =os.path.join(__snake_case , __snake_case )
torch.save(__snake_case , __snake_case )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(__snake_case , __snake_case )
# Otherwise, let's build the index
lowerCamelCase_ ={}
for idx, shard in enumerate(__snake_case ):
lowerCamelCase_ =weights_name.replace('''.bin''' , F'''-{idx+1:05d}-of-{len(__snake_case ):05d}.bin''' )
lowerCamelCase_ =os.path.join(__snake_case , weights_name.replace('''.bin''' , F'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(__snake_case , os.path.join(__snake_case , __snake_case ) )
for key in shard:
lowerCamelCase_ =shard_file
# Add the metadata
lowerCamelCase_ ={'''total_size''': total_size}
lowerCamelCase_ ={'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(__snake_case , __snake_case ) , '''w''' , encoding='''utf-8''' ) as f:
lowerCamelCase_ =json.dumps(__snake_case , indent=2 , sort_keys=__snake_case ) + '''\n'''
f.write(__snake_case )
return metadata, index
if __name__ == "__main__":
a_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--nllb_moe_checkpoint_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000""",
type=str,
required=False,
help="""Path to a directory containing a folder per layer. Follows the original Google format.""",
)
parser.add_argument("""--dtype""", default="""float32""", type=str, required=False, help="""dtype of the saved model""")
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b""",
type=str,
required=False,
help="""Path to the output pytorch model.""",
)
a_ : Tuple = parser.parse_args()
a_ , a_ : int = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
1_28,
args.dtype,
)
a_ : Tuple = NllbMoeConfig.from_pretrained(
"""facebook/nllb-200-3.3B""", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=1_28
)
config.save_pretrained(args.pytorch_dump_folder_path)
a_ : Any = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("""Done""")
model.save_pretrained(args.pytorch_dump_folder_path)
| 6 | 1 |
'''simple docstring'''
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
UpperCamelCase_ = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
UpperCamelCase_ = importlib.util.spec_from_file_location(
"""transformers""",
os.path.join(PATH_TO_TRANSFORMERS, """__init__.py"""),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
UpperCamelCase_ = spec.loader.load_module()
UpperCamelCase_ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
UpperCamelCase_ = re.compile("""\[(.+?)\]\((https://huggingface\.co/.+?)\)""")
UpperCamelCase_ = {
"""CLIPConfigMixin""",
"""DecisionTransformerConfigMixin""",
"""EncoderDecoderConfigMixin""",
"""RagConfigMixin""",
"""SpeechEncoderDecoderConfigMixin""",
"""VisionEncoderDecoderConfigMixin""",
"""VisionTextDualEncoderConfigMixin""",
}
def _UpperCAmelCase ( ) -> Any:
_lowerCAmelCase : Any = []
for config_class in list(CONFIG_MAPPING.values() ):
_lowerCAmelCase : List[Any] = False
# source code of `config_class`
_lowerCAmelCase : Tuple = inspect.getsource(a_ )
_lowerCAmelCase : Dict = _re_checkpoint.findall(a_ )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
_lowerCAmelCase : List[Any] = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
_lowerCAmelCase : Tuple = f'https://huggingface.co/{ckpt_name}'
if ckpt_link == ckpt_link_from_name:
_lowerCAmelCase : List[str] = True
break
_lowerCAmelCase : Optional[int] = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(a_ )
if len(a_ ) > 0:
_lowerCAmelCase : int = '''\n'''.join(sorted(a_ ) )
raise ValueError(f'The following configurations don\'t contain any valid checkpoint:\n{message}' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 309 |
"""simple docstring"""
def __lowerCamelCase ( a_ : int , a_ : str ) -> Optional[int]:
__SCREAMING_SNAKE_CASE :Optional[int] = [1]
for i in range(2 , a_ ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
__SCREAMING_SNAKE_CASE :List[str] = []
__SCREAMING_SNAKE_CASE :Optional[Any] = list(range(a_ ) )
# Find permutation
while factorials:
__SCREAMING_SNAKE_CASE :Optional[int] = factorials.pop()
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Dict = divmod(a_ , a_ )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod() | 191 | 0 |
import argparse
from collections import defaultdict
import yaml
__A = '''docs/source/en/_toctree.yml'''
def snake_case_(_UpperCamelCase ) -> Dict:
"""simple docstring"""
_snake_case = defaultdict(_UpperCamelCase )
for doc in model_doc:
counts[doc["local"]] += 1
_snake_case = [key for key, value in counts.items() if value > 1]
_snake_case = []
for duplicate_key in duplicates:
_snake_case = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} )
if len(_UpperCamelCase ) > 1:
raise ValueError(
F"""{duplicate_key} is present several times in the documentation table of content at """
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] )
# Sort
return sorted(_UpperCamelCase , key=lambda _UpperCamelCase : s["title"].lower() )
def snake_case_(_UpperCamelCase=False ) -> List[str]:
"""simple docstring"""
with open(_UpperCamelCase , encoding='''utf-8''' ) as f:
_snake_case = yaml.safe_load(f.read() )
# Get to the API doc
_snake_case = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_snake_case = content[api_idx]['''sections''']
# Then to the model doc
_snake_case = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
_snake_case = api_doc[model_idx]['''sections''']
_snake_case = [(idx, section) for idx, section in enumerate(_UpperCamelCase ) if '''sections''' in section]
_snake_case = False
for idx, modality_doc in modalities_docs:
_snake_case = modality_doc['''sections''']
_snake_case = clean_model_doc_toc(_UpperCamelCase )
if old_modality_doc != new_modality_doc:
_snake_case = True
if overwrite:
_snake_case = new_modality_doc
if diff:
if overwrite:
_snake_case = model_doc
_snake_case = api_doc
with open(_UpperCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(_UpperCamelCase , allow_unicode=_UpperCamelCase ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
__A = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 278 |
from math import factorial
def snake_case_(_UpperCamelCase , _UpperCamelCase ) -> int:
"""simple docstring"""
if n < k or k < 0:
raise ValueError('''Please enter positive integers for n and k where n >= k''' )
return factorial(_UpperCamelCase ) // (factorial(_UpperCamelCase ) * factorial(n - k ))
if __name__ == "__main__":
print(
'''The number of five-card hands possible from a standard''',
f'''fifty-two card deck is: {combinations(52, 5)}\n''',
)
print(
'''If a class of 40 students must be arranged into groups of''',
f'''4 for group projects, there are {combinations(40, 4)} ways''',
'''to arrange them.\n''',
)
print(
'''If 10 teams are competing in a Formula One race, there''',
f'''are {combinations(10, 3)} ways that first, second and''',
'''third place can be awarded.''',
)
| 278 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
lowercase : int = logging.get_logger(__name__)
lowercase : Optional[int] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
lowercase : List[Any] = {
"vocab_file": {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/vocab.txt",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/vocab.txt",
"bert-base-multilingual-uncased": (
"https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt"
),
"bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt",
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"
),
"bert-base-cased-finetuned-mrpc": (
"https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt"
),
"bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt",
"bert-base-german-dbmdz-uncased": (
"https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt"
),
"wietsedv/bert-base-dutch-cased": (
"https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json",
"bert-base-multilingual-uncased": (
"https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json"
),
"bert-base-multilingual-cased": (
"https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json"
),
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"
),
"bert-base-cased-finetuned-mrpc": (
"https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json"
),
"bert-base-german-dbmdz-cased": (
"https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json"
),
"bert-base-german-dbmdz-uncased": (
"https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json"
),
"wietsedv/bert-base-dutch-cased": (
"https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json"
),
},
}
lowercase : Tuple = {
"bert-base-uncased": 512,
"bert-large-uncased": 512,
"bert-base-cased": 512,
"bert-large-cased": 512,
"bert-base-multilingual-uncased": 512,
"bert-base-multilingual-cased": 512,
"bert-base-chinese": 512,
"bert-base-german-cased": 512,
"bert-large-uncased-whole-word-masking": 512,
"bert-large-cased-whole-word-masking": 512,
"bert-large-uncased-whole-word-masking-finetuned-squad": 512,
"bert-large-cased-whole-word-masking-finetuned-squad": 512,
"bert-base-cased-finetuned-mrpc": 512,
"bert-base-german-dbmdz-cased": 512,
"bert-base-german-dbmdz-uncased": 512,
"TurkuNLP/bert-base-finnish-cased-v1": 512,
"TurkuNLP/bert-base-finnish-uncased-v1": 512,
"wietsedv/bert-base-dutch-cased": 512,
}
lowercase : Optional[int] = {
"bert-base-uncased": {"do_lower_case": True},
"bert-large-uncased": {"do_lower_case": True},
"bert-base-cased": {"do_lower_case": False},
"bert-large-cased": {"do_lower_case": False},
"bert-base-multilingual-uncased": {"do_lower_case": True},
"bert-base-multilingual-cased": {"do_lower_case": False},
"bert-base-chinese": {"do_lower_case": False},
"bert-base-german-cased": {"do_lower_case": False},
"bert-large-uncased-whole-word-masking": {"do_lower_case": True},
"bert-large-cased-whole-word-masking": {"do_lower_case": False},
"bert-large-uncased-whole-word-masking-finetuned-squad": {"do_lower_case": True},
"bert-large-cased-whole-word-masking-finetuned-squad": {"do_lower_case": False},
"bert-base-cased-finetuned-mrpc": {"do_lower_case": False},
"bert-base-german-dbmdz-cased": {"do_lower_case": False},
"bert-base-german-dbmdz-uncased": {"do_lower_case": True},
"TurkuNLP/bert-base-finnish-cased-v1": {"do_lower_case": False},
"TurkuNLP/bert-base-finnish-uncased-v1": {"do_lower_case": True},
"wietsedv/bert-base-dutch-cased": {"do_lower_case": False},
}
class __UpperCAmelCase ( _lowerCamelCase ):
__lowercase = VOCAB_FILES_NAMES
__lowercase = PRETRAINED_VOCAB_FILES_MAP
__lowercase = PRETRAINED_INIT_CONFIGURATION
__lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase = BertTokenizer
def __init__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=True , lowerCAmelCase_="[UNK]" , lowerCAmelCase_="[SEP]" , lowerCAmelCase_="[PAD]" , lowerCAmelCase_="[CLS]" , lowerCAmelCase_="[MASK]" , lowerCAmelCase_=True , lowerCAmelCase_=None , **lowerCAmelCase_ , ):
"""simple docstring"""
super().__init__(
lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , tokenize_chinese_chars=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ , **lowerCAmelCase_ , )
_snake_case = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , lowerCAmelCase_ ) != do_lower_case
or normalizer_state.get('strip_accents' , lowerCAmelCase_ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , lowerCAmelCase_ ) != tokenize_chinese_chars
):
_snake_case = getattr(lowerCAmelCase_ , normalizer_state.pop('type' ) )
_snake_case = do_lower_case
_snake_case = strip_accents
_snake_case = tokenize_chinese_chars
_snake_case = normalizer_class(**lowerCAmelCase_ )
_snake_case = do_lower_case
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None ):
"""simple docstring"""
_snake_case = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ):
"""simple docstring"""
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ):
"""simple docstring"""
_snake_case = self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_ )
return tuple(lowerCAmelCase_ )
| 42 | """simple docstring"""
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class _A ( lowerCAmelCase , lowerCAmelCase ):
snake_case__ : Tuple = 1
@register_to_config
def __init__( self , __lowerCAmelCase = 1000 , __lowerCAmelCase = None ):
"""simple docstring"""
self.set_timesteps(__lowerCAmelCase )
# standard deviation of the initial noise distribution
lowercase = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
lowercase = 4
# running values
lowercase = []
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
"""simple docstring"""
lowercase = num_inference_steps
lowercase = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
lowercase = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
lowercase = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
lowercase = torch.sin(steps * math.pi / 2 ) ** 2
lowercase = (1.0 - self.betas**2) ** 0.5
lowercase = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
lowercase = timesteps.to(__lowerCAmelCase )
lowercase = []
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = True , ):
"""simple docstring"""
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" )
lowercase = (self.timesteps == timestep).nonzero().item()
lowercase = timestep_index + 1
lowercase = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(__lowerCAmelCase )
if len(self.ets ) == 1:
lowercase = self.ets[-1]
elif len(self.ets ) == 2:
lowercase = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
lowercase = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
lowercase = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
lowercase = self._get_prev_sample(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
return sample
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = self.alphas[timestep_index]
lowercase = self.betas[timestep_index]
lowercase = self.alphas[prev_timestep_index]
lowercase = self.betas[prev_timestep_index]
lowercase = (sample - sigma * ets) / max(__lowerCAmelCase , 1E-8 )
lowercase = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self ):
"""simple docstring"""
return self.config.num_train_timesteps
| 197 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
A_ :Optional[int] = None
A_ :str = logging.get_logger(__name__)
A_ :Optional[Any] = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
A_ :Optional[Any] = {
"""vocab_file""": {
"""google/fnet-base""": """https://huggingface.co/google/fnet-base/resolve/main/spiece.model""",
"""google/fnet-large""": """https://huggingface.co/google/fnet-large/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""google/fnet-base""": """https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json""",
"""google/fnet-large""": """https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json""",
},
}
A_ :int = {
"""google/fnet-base""": 512,
"""google/fnet-large""": 512,
}
A_ :List[Any] = """▁"""
class __A ( a ):
"""simple docstring"""
UpperCamelCase__ : List[str] =VOCAB_FILES_NAMES
UpperCamelCase__ : str =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : Tuple =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : Any =["""input_ids""", """token_type_ids"""]
UpperCamelCase__ : int =FNetTokenizer
def __init__( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=False , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__="<unk>" , lowerCamelCase__="[SEP]" , lowerCamelCase__="<pad>" , lowerCamelCase__="[CLS]" , lowerCamelCase__="[MASK]" , **lowerCamelCase__ , ):
"""simple docstring"""
__UpperCamelCase : str =(
AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase , normalized=_lowercase )
if isinstance(_lowercase , _lowercase )
else mask_token
)
super().__init__(
_lowercase , tokenizer_file=_lowercase , do_lower_case=_lowercase , remove_space=_lowercase , keep_accents=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , **_lowercase , )
__UpperCamelCase : List[str] =do_lower_case
__UpperCamelCase : Optional[Any] =remove_space
__UpperCamelCase : str =keep_accents
__UpperCamelCase : Any =vocab_file
__UpperCamelCase : List[str] =False if not self.vocab_file else True
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] =[self.sep_token_id]
__UpperCamelCase : Any =[self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
"""simple docstring"""
__UpperCamelCase : str =[self.sep_token_id]
__UpperCamelCase : Dict =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
"""simple docstring"""
if not os.path.isdir(_lowercase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__UpperCamelCase : str =os.path.join(
_lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ):
copyfile(self.vocab_file , _lowercase )
return (out_vocab_file,)
| 366 |
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class __A ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] =[
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(lowerCamelCase__ ) )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Any =[
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(lowerCamelCase__ ) )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[str] =[
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(lowerCamelCase__ ) )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[str] =[
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
self.assertTrue(is_safetensors_compatible(lowerCamelCase__ ) )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Any =[
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
# Removed: 'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(lowerCamelCase__ ) )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Tuple =[
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
__UpperCamelCase : List[Any] ='fp16'
self.assertTrue(is_safetensors_compatible(lowerCamelCase__ , variant=lowerCamelCase__ ) )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : str =[
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
__UpperCamelCase : str ='fp16'
self.assertTrue(is_safetensors_compatible(lowerCamelCase__ , variant=lowerCamelCase__ ) )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Optional[int] =[
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
__UpperCamelCase : int ='fp16'
self.assertTrue(is_safetensors_compatible(lowerCamelCase__ , variant=lowerCamelCase__ ) )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Any =[
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
__UpperCamelCase : str ='fp16'
self.assertFalse(is_safetensors_compatible(lowerCamelCase__ , variant=lowerCamelCase__ ) )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[str] =[
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
]
__UpperCamelCase : Any ='fp16'
self.assertTrue(is_safetensors_compatible(lowerCamelCase__ , variant=lowerCamelCase__ ) )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[Any] =[
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
__UpperCamelCase : Tuple ='fp16'
self.assertTrue(is_safetensors_compatible(lowerCamelCase__ , variant=lowerCamelCase__ ) )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Any =[
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
# 'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
__UpperCamelCase : Any ='fp16'
self.assertFalse(is_safetensors_compatible(lowerCamelCase__ , variant=lowerCamelCase__ ) )
| 245 | 0 |
'''simple docstring'''
import math
def lowerCamelCase ( __lowerCamelCase : int ) ->bool:
_SCREAMING_SNAKE_CASE = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(__lowerCamelCase )
def lowerCamelCase ( __lowerCamelCase : float = 1 / 1_2345 ) ->int:
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 3
while True:
_SCREAMING_SNAKE_CASE = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE = int(__lowerCamelCase )
total_partitions += 1
if check_partition_perfect(__lowerCamelCase ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(__lowerCamelCase )
integer += 1
if __name__ == "__main__":
print(f"""{solution() = }""")
| 58 |
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger()
@dataclass
class a_ :
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = field(default_factory=snake_case_ )
UpperCamelCase = field(default_factory=snake_case_ )
def snake_case_( self , A , A , A ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = len(list(m.modules() ) ) == 1 or isinstance(A , nn.Convad ) or isinstance(A , nn.BatchNormad )
if has_not_submodules:
self.traced.append(A )
def __call__( self , A ) -> str:
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(A )
[x.remove() for x in self.handles]
return self
@property
def snake_case_( self ) -> str:
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda A : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class a_ :
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 0
UpperCamelCase = field(default_factory=snake_case_ )
UpperCamelCase = field(default_factory=snake_case_ )
def __call__( self , A ) -> List[str]:
_SCREAMING_SNAKE_CASE = Tracker(self.dest )(A ).parametrized
_SCREAMING_SNAKE_CASE = Tracker(self.src )(A ).parametrized
_SCREAMING_SNAKE_CASE = list(filter(lambda A : type(A ) not in self.src_skip , A ) )
_SCREAMING_SNAKE_CASE = list(filter(lambda A : type(A ) not in self.dest_skip , A ) )
if len(A ) != len(A ):
raise Exception(
f'Numbers of operations are different. Source module has {len(A )} operations while'
f' destination module has {len(A )}.' )
for dest_m, src_m in zip(A , A ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f'Transfered from={src_m} to={dest_m}' )
def lowerCamelCase ( __lowerCamelCase : str , __lowerCamelCase : ResNetConfig , __lowerCamelCase : Path , __lowerCamelCase : bool = True ) ->int:
print(F'Converting {name}...' )
with torch.no_grad():
_SCREAMING_SNAKE_CASE = timm.create_model(__lowerCamelCase , pretrained=__lowerCamelCase ).eval()
_SCREAMING_SNAKE_CASE = ResNetForImageClassification(__lowerCamelCase ).eval()
_SCREAMING_SNAKE_CASE = ModuleTransfer(src=__lowerCamelCase , dest=__lowerCamelCase )
_SCREAMING_SNAKE_CASE = torch.randn((1, 3, 224, 224) )
module_transfer(__lowerCamelCase )
assert torch.allclose(from_model(__lowerCamelCase ) , our_model(__lowerCamelCase ).logits ), "The model logits don't match the original one."
_SCREAMING_SNAKE_CASE = F'resnet{"-".join(name.split("resnet" ) )}'
print(__lowerCamelCase )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="""Add model""" , use_temp_dir=__lowerCamelCase , )
# we can use the convnext one
_SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="""Add image processor""" , use_temp_dir=__lowerCamelCase , )
print(F'Pushed {checkpoint_name}' )
def lowerCamelCase ( __lowerCamelCase : Path , __lowerCamelCase : str = None , __lowerCamelCase : bool = True ) ->Any:
_SCREAMING_SNAKE_CASE = """imagenet-1k-id2label.json"""
_SCREAMING_SNAKE_CASE = 1000
_SCREAMING_SNAKE_CASE = (1, num_labels)
_SCREAMING_SNAKE_CASE = """huggingface/label-files"""
_SCREAMING_SNAKE_CASE = num_labels
_SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
_SCREAMING_SNAKE_CASE = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
_SCREAMING_SNAKE_CASE = idalabel
_SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
_SCREAMING_SNAKE_CASE = partial(__lowerCamelCase , num_labels=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid=__lowerCamelCase )
_SCREAMING_SNAKE_CASE = {
"""resnet18""": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type="""basic""" ),
"""resnet26""": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="""bottleneck""" ),
"""resnet34""": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type="""basic""" ),
"""resnet50""": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="""bottleneck""" ),
"""resnet101""": ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="""bottleneck""" ),
"""resnet152""": ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="""bottleneck""" ),
}
if model_name:
convert_weight_and_push(__lowerCamelCase , names_to_config[model_name] , __lowerCamelCase , __lowerCamelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return config, expected_shape
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help=(
"""The name of the model you wish to convert, it must be one of the supported resnet* architecture,"""
""" currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=Path,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
default=True,
type=bool,
required=False,
help="""If True, push model and image processor to the hub.""",
)
lowercase_ = parser.parse_args()
lowercase_ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 58 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase_ : Tuple = {"""configuration_vit_msn""": ["""VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMSNConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[int] = [
"""VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTMSNModel""",
"""ViTMSNForImageClassification""",
"""ViTMSNPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 350 | def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
# Return True if there is node that has not iterated.
__a = [False] * len(__lowerCamelCase )
__a = []
queue.append(__lowerCamelCase )
__a = True
while queue:
__a = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__lowerCamelCase )
__a = True
__a = u
return visited[t]
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
# This array is filled by BFS and to store path
__a = [-1] * (len(__lowerCamelCase ))
__a = 0
while bfs(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__a = float('Inf' )
__a = sink
while s != source:
# Find the minimum value in select path
__a = min(__lowerCamelCase , graph[parent[s]][s] )
__a = parent[s]
max_flow += path_flow
__a = sink
while v != source:
__a = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
__a = parent[v]
return max_flow
lowerCamelCase_ : str = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
lowerCamelCase_ , lowerCamelCase_ : Union[str, Any] = 0, 5
print(ford_fulkerson(graph, source, sink))
| 197 | 0 |
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =(DDPMScheduler,)
def UpperCamelCase__ ( self : List[str] , **__a : List[str] ):
_a = {
"num_train_timesteps": 10_00,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**__a )
return config
def UpperCamelCase__ ( self : Optional[Any] ):
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=__a )
def UpperCamelCase__ ( self : Union[str, Any] ):
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__a , beta_end=__a )
def UpperCamelCase__ ( self : int ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__a )
def UpperCamelCase__ ( self : List[Any] ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=__a )
def UpperCamelCase__ ( self : Any ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__a )
def UpperCamelCase__ ( self : List[Any] ):
self.check_over_configs(thresholding=__a )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=__a , prediction_type=__a , sample_max_value=__a , )
def UpperCamelCase__ ( self : Tuple ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=__a )
def UpperCamelCase__ ( self : str ):
for t in [0, 5_00, 9_99]:
self.check_over_forward(time_step=__a )
def UpperCamelCase__ ( self : Any ):
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**__a )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.00979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.02 ) ) < 1e-5
def UpperCamelCase__ ( self : Tuple ):
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**__a )
_a = len(__a )
_a = self.dummy_model()
_a = self.dummy_sample_deter
_a = torch.manual_seed(0 )
for t in reversed(range(__a ) ):
# 1. predict noise residual
_a = model(__a , __a )
# 2. predict previous mean of sample x_t-1
_a = scheduler.step(__a , __a , __a , generator=__a ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_a = pred_prev_sample
_a = torch.sum(torch.abs(__a ) )
_a = torch.mean(torch.abs(__a ) )
assert abs(result_sum.item() - 258.9606 ) < 1e-2
assert abs(result_mean.item() - 0.3372 ) < 1e-3
def UpperCamelCase__ ( self : List[str] ):
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config(prediction_type="v_prediction" )
_a = scheduler_class(**__a )
_a = len(__a )
_a = self.dummy_model()
_a = self.dummy_sample_deter
_a = torch.manual_seed(0 )
for t in reversed(range(__a ) ):
# 1. predict noise residual
_a = model(__a , __a )
# 2. predict previous mean of sample x_t-1
_a = scheduler.step(__a , __a , __a , generator=__a ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_a = pred_prev_sample
_a = torch.sum(torch.abs(__a ) )
_a = torch.mean(torch.abs(__a ) )
assert abs(result_sum.item() - 202.0296 ) < 1e-2
assert abs(result_mean.item() - 0.2631 ) < 1e-3
def UpperCamelCase__ ( self : Dict ):
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**__a )
_a = [1_00, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=__a )
_a = scheduler.timesteps
for i, timestep in enumerate(__a ):
if i == len(__a ) - 1:
_a = -1
else:
_a = timesteps[i + 1]
_a = scheduler.previous_timestep(__a )
_a = prev_t.item()
self.assertEqual(__a , __a )
def UpperCamelCase__ ( self : str ):
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**__a )
_a = [1_00, 87, 50, 51, 0]
with self.assertRaises(__a , msg="`custom_timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=__a )
def UpperCamelCase__ ( self : str ):
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**__a )
_a = [1_00, 87, 50, 1, 0]
_a = len(__a )
with self.assertRaises(__a , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ):
scheduler.set_timesteps(num_inference_steps=__a , timesteps=__a )
def UpperCamelCase__ ( self : Dict ):
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**__a )
_a = [scheduler.config.num_train_timesteps]
with self.assertRaises(
__a , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=__a )
| 63 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowerCAmelCase_ : Any = get_tests_dir('fixtures')
lowerCAmelCase_ : Union[str, Any] = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
lowerCAmelCase_ : Dict = get_tests_dir('fixtures/dummy-config.json')
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self : Optional[int] ):
_a = 0
def UpperCamelCase__ ( self : str ):
_a = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base-960h" )
self.assertIsInstance(__a , __a )
def UpperCamelCase__ ( self : Tuple ):
_a = AutoFeatureExtractor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def UpperCamelCase__ ( self : List[Any] ):
with tempfile.TemporaryDirectory() as tmpdirname:
_a = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
_a = AutoFeatureExtractor.from_pretrained(__a ).to_dict()
config_dict.pop("feature_extractor_type" )
_a = WavaVecaFeatureExtractor(**__a )
# save in new folder
model_config.save_pretrained(__a )
config.save_pretrained(__a )
_a = AutoFeatureExtractor.from_pretrained(__a )
# make sure private variable is not incorrectly saved
_a = json.loads(config.to_json_string() )
self.assertTrue("_processor_class" not in dict_as_saved )
self.assertIsInstance(__a , __a )
def UpperCamelCase__ ( self : Tuple ):
_a = AutoFeatureExtractor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def UpperCamelCase__ ( self : Union[str, Any] ):
with self.assertRaisesRegex(
__a , "bert-base is not a local folder and is not a valid model identifier" ):
_a = AutoFeatureExtractor.from_pretrained("bert-base" )
def UpperCamelCase__ ( self : Optional[Any] ):
with self.assertRaisesRegex(
__a , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
_a = AutoFeatureExtractor.from_pretrained(__a , revision="aaaaaa" )
def UpperCamelCase__ ( self : List[Any] ):
with self.assertRaisesRegex(
__a , "hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json." , ):
_a = AutoFeatureExtractor.from_pretrained("hf-internal-testing/config-no-model" )
def UpperCamelCase__ ( self : List[Any] ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__a ):
_a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__a ):
_a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a )
_a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__a )
_a = AutoFeatureExtractor.from_pretrained(__a , trust_remote_code=__a )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
def UpperCamelCase__ ( self : Any ):
try:
AutoConfig.register("custom" , __a )
AutoFeatureExtractor.register(__a , __a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__a ):
AutoFeatureExtractor.register(__a , __a )
# Now that the config is registered, it can be used as any other config with the auto-API
_a = CustomFeatureExtractor.from_pretrained(__a )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__a )
_a = AutoFeatureExtractor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def UpperCamelCase__ ( self : Tuple ):
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =True
try:
AutoConfig.register("custom" , __a )
AutoFeatureExtractor.register(__a , __a )
# If remote code is not set, the default is to use local
_a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
_a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
_a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(not hasattr(__a , "is_local" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 63 | 1 |
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : int = 3 , snake_case_ : int = 7 , snake_case_ : int = 1000000 ) -> int:
"""simple docstring"""
_lowerCAmelCase = 0
_lowerCAmelCase = 1
for current_denominator in range(1 , limit + 1 ):
_lowerCAmelCase = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
_lowerCAmelCase = current_numerator
_lowerCAmelCase = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_0_0_0_0_0_0))
| 350 |
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : int , snake_case_ : list[int] , snake_case_ : int ) -> int:
"""simple docstring"""
def count_of_possible_combinations(snake_case_ : int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(snake_case_ )
def __UpperCAmelCase ( snake_case_ : int , snake_case_ : list[int] , snake_case_ : int ) -> int:
"""simple docstring"""
def count_of_possible_combinations_with_dp_array(
snake_case_ : int , snake_case_ : list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
_lowerCAmelCase = sum(
count_of_possible_combinations_with_dp_array(target - item , snake_case_ )
for item in array )
_lowerCAmelCase = answer
return answer
_lowerCAmelCase = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(snake_case_ , snake_case_ )
def __UpperCAmelCase ( snake_case_ : int , snake_case_ : list[int] , snake_case_ : int ) -> int:
"""simple docstring"""
_lowerCAmelCase = [0] * (target + 1)
_lowerCAmelCase = 1
for i in range(1 , target + 1 ):
for j in range(snake_case_ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE : Tuple = 3
SCREAMING_SNAKE_CASE : Any = 5
SCREAMING_SNAKE_CASE : Optional[int] = [1, 2, 5]
print(combination_sum_iv(n, array, target)) | 317 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : Dict = logging.get_logger(__name__)
class __A( a ):
snake_case_ = '''encoder-decoder'''
snake_case_ = True
def __init__( self , **_snake_case ) -> str:
'''simple docstring'''
super().__init__(**_snake_case )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
__a = kwargs.pop('''encoder''' )
__a = encoder_config.pop('''model_type''' )
__a = kwargs.pop('''decoder''' )
__a = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
__a = AutoConfig.for_model(_snake_case , **_snake_case )
__a = AutoConfig.for_model(_snake_case , **_snake_case )
__a = True
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls , _snake_case , _snake_case , **_snake_case ) -> PretrainedConfig:
'''simple docstring'''
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
__a = True
__a = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
__a = copy.deepcopy(self.__dict__ )
__a = self.encoder.to_dict()
__a = self.decoder.to_dict()
__a = self.__class__.model_type
return output | 6 |
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __A( a , a , unittest.TestCase ):
snake_case_ = AutoencoderKL
snake_case_ = '''sample'''
snake_case_ = 1E-2
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
__a = 4
__a = 3
__a = (32, 32)
__a = floats_tensor((batch_size, num_channels) + sizes ).to(_snake_case )
return {"sample": image}
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
return (3, 32, 32)
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
return (3, 32, 32)
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
__a = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
__a = self.dummy_input
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skipIf(torch_device == '''mps''' , '''Gradient checkpointing skipped on MPS''' )
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
__a , __a = self.prepare_init_args_and_inputs_for_common()
__a = self.model_class(**_snake_case )
model.to(_snake_case )
assert not model.is_gradient_checkpointing and model.training
__a = model(**_snake_case ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
__a = torch.randn_like(_snake_case )
__a = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
__a = self.model_class(**_snake_case )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(_snake_case )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
__a = model_a(**_snake_case ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
__a = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1E-5 )
__a = dict(model.named_parameters() )
__a = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) )
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
__a , __a = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' , output_loading_info=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(_snake_case )
__a = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
__a = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' )
__a = model.to(_snake_case )
model.eval()
if torch_device == "mps":
__a = torch.manual_seed(0 )
else:
__a = torch.Generator(device=_snake_case ).manual_seed(0 )
__a = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
__a = image.to(_snake_case )
with torch.no_grad():
__a = model(_snake_case , sample_posterior=_snake_case , generator=_snake_case ).sample
__a = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
__a = torch.tensor(
[
-4.0_078E-01,
-3.8_323E-04,
-1.2_681E-01,
-1.1_462E-01,
2.0_095E-01,
1.0_893E-01,
-8.8_247E-02,
-3.0_361E-01,
-9.8_644E-03,
] )
elif torch_device == "cpu":
__a = torch.tensor(
[-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] )
else:
__a = torch.tensor(
[-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] )
self.assertTrue(torch_all_close(_snake_case , _snake_case , rtol=1E-2 ) )
@slow
class __A( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[Any]:
'''simple docstring'''
return F"""gaussian_noise_s={seed}_shape={'_'.join([str(_snake_case ) for s in shape] )}.npy"""
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self , _snake_case=0 , _snake_case=(4, 3, 512, 512) , _snake_case=False ) -> Any:
'''simple docstring'''
__a = torch.floataa if fpaa else torch.floataa
__a = torch.from_numpy(load_hf_numpy(self.get_file_format(_snake_case , _snake_case ) ) ).to(_snake_case ).to(_snake_case )
return image
def SCREAMING_SNAKE_CASE_ ( self , _snake_case="CompVis/stable-diffusion-v1-4" , _snake_case=False ) -> Optional[Any]:
'''simple docstring'''
__a = '''fp16''' if fpaa else None
__a = torch.floataa if fpaa else torch.floataa
__a = AutoencoderKL.from_pretrained(
_snake_case , subfolder='''vae''' , torch_dtype=_snake_case , revision=_snake_case , )
model.to(_snake_case ).eval()
return model
def SCREAMING_SNAKE_CASE_ ( self , _snake_case=0 ) -> Tuple:
'''simple docstring'''
if torch_device == "mps":
return torch.manual_seed(_snake_case )
return torch.Generator(device=_snake_case ).manual_seed(_snake_case )
@parameterized.expand(
[
# fmt: off
[33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case ) -> List[Any]:
'''simple docstring'''
__a = self.get_sd_vae_model()
__a = self.get_sd_image(_snake_case )
__a = self.get_generator(_snake_case )
with torch.no_grad():
__a = model(_snake_case , generator=_snake_case , sample_posterior=_snake_case ).sample
assert sample.shape == image.shape
__a = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__a = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(_snake_case , _snake_case , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]],
[47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]],
# fmt: on
] )
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Tuple:
'''simple docstring'''
__a = self.get_sd_vae_model(fpaa=_snake_case )
__a = self.get_sd_image(_snake_case , fpaa=_snake_case )
__a = self.get_generator(_snake_case )
with torch.no_grad():
__a = model(_snake_case , generator=_snake_case , sample_posterior=_snake_case ).sample
assert sample.shape == image.shape
__a = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__a = torch.tensor(_snake_case )
assert torch_all_close(_snake_case , _snake_case , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case ) -> Optional[int]:
'''simple docstring'''
__a = self.get_sd_vae_model()
__a = self.get_sd_image(_snake_case )
with torch.no_grad():
__a = model(_snake_case ).sample
assert sample.shape == image.shape
__a = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__a = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(_snake_case , _snake_case , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]],
[37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]],
# fmt: on
] )
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[int]:
'''simple docstring'''
__a = self.get_sd_vae_model()
__a = self.get_sd_image(_snake_case , shape=(3, 4, 64, 64) )
with torch.no_grad():
__a = model.decode(_snake_case ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
__a = sample[-1, -2:, :2, -2:].flatten().cpu()
__a = torch.tensor(_snake_case )
assert torch_all_close(_snake_case , _snake_case , atol=1E-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]],
[16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]],
# fmt: on
] )
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[Any]:
'''simple docstring'''
__a = self.get_sd_vae_model(fpaa=_snake_case )
__a = self.get_sd_image(_snake_case , shape=(3, 4, 64, 64) , fpaa=_snake_case )
with torch.no_grad():
__a = model.decode(_snake_case ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
__a = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__a = torch.tensor(_snake_case )
assert torch_all_close(_snake_case , _snake_case , atol=5E-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Union[str, Any]:
'''simple docstring'''
__a = self.get_sd_vae_model(fpaa=_snake_case )
__a = self.get_sd_image(_snake_case , shape=(3, 4, 64, 64) , fpaa=_snake_case )
with torch.no_grad():
__a = model.decode(_snake_case ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__a = model.decode(_snake_case ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(_snake_case , _snake_case , atol=1E-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> List[str]:
'''simple docstring'''
__a = self.get_sd_vae_model()
__a = self.get_sd_image(_snake_case , shape=(3, 4, 64, 64) )
with torch.no_grad():
__a = model.decode(_snake_case ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__a = model.decode(_snake_case ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(_snake_case , _snake_case , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]],
[47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]],
# fmt: on
] )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[int]:
'''simple docstring'''
__a = self.get_sd_vae_model()
__a = self.get_sd_image(_snake_case )
__a = self.get_generator(_snake_case )
with torch.no_grad():
__a = model.encode(_snake_case ).latent_dist
__a = dist.sample(generator=_snake_case )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
__a = sample[0, -1, -3:, -3:].flatten().cpu()
__a = torch.tensor(_snake_case )
__a = 3E-3 if torch_device != '''mps''' else 1E-2
assert torch_all_close(_snake_case , _snake_case , atol=_snake_case ) | 6 | 1 |
import numpy as np
def snake_case__ ( SCREAMING_SNAKE_CASE_ : np.array ):
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 357 |
from __future__ import annotations
from collections.abc import Callable
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Callable[[int | float], int | float] , SCREAMING_SNAKE_CASE_ : int | float , SCREAMING_SNAKE_CASE_ : int | float , SCREAMING_SNAKE_CASE_ : int = 100 , ):
'''simple docstring'''
lowercase__ : Tuple = x_start
lowercase__ : Tuple = fnc(SCREAMING_SNAKE_CASE_ )
lowercase__ : List[Any] = 0.0
for _ in range(SCREAMING_SNAKE_CASE_ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
lowercase__ : Any = (x_end - x_start) / steps + xa
lowercase__ : Optional[Any] = fnc(SCREAMING_SNAKE_CASE_ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
lowercase__ : Any = xa
lowercase__ : str = fxa
return area
if __name__ == "__main__":
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Tuple ):
'''simple docstring'''
return x**3 + x**2
print('''f(x) = x^3 + x^2''')
print('''The area between the curve, x = -5, x = 5 and the x axis is:''')
snake_case_ = 10
while i <= 100_000:
print(F'''with {i} steps: {trapezoidal_area(f, -5, 5, i)}''')
i *= 10
| 216 | 0 |
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def _a ( lowerCamelCase ):
return EnvironmentCommand()
class A__ ( __SCREAMING_SNAKE_CASE):
@staticmethod
def UpperCamelCase__ ( __magic_name__ ):
lowerCamelCase : Optional[int] = parser.add_parser("""env""" )
download_parser.set_defaults(func=__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = huggingface_hub.__version__
lowerCamelCase : int = """not installed"""
lowerCamelCase : Dict = """NA"""
if is_torch_available():
import torch
lowerCamelCase : int = torch.__version__
lowerCamelCase : List[str] = torch.cuda.is_available()
lowerCamelCase : List[str] = """not installed"""
if is_transformers_available():
import transformers
lowerCamelCase : Any = transformers.__version__
lowerCamelCase : int = """not installed"""
if is_accelerate_available():
import accelerate
lowerCamelCase : List[str] = accelerate.__version__
lowerCamelCase : Any = """not installed"""
if is_xformers_available():
import xformers
lowerCamelCase : Optional[int] = xformers.__version__
lowerCamelCase : List[Any] = {
"""`diffusers` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""PyTorch version (GPU?)""": F'''{pt_version} ({pt_cuda_available})''',
"""Huggingface_hub version""": hub_version,
"""Transformers version""": transformers_version,
"""Accelerate version""": accelerate_version,
"""xFormers version""": xformers_version,
"""Using GPU in script?""": """<fill in>""",
"""Using distributed or parallel set-up in script?""": """<fill in>""",
}
print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" )
print(self.format_dict(__magic_name__ ) )
return info
@staticmethod
def UpperCamelCase__ ( __magic_name__ ):
return "\n".join([F'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 287 |
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
_lowerCamelCase ={
"""return_dict""": False,
"""output_hidden_states""": True,
"""output_attentions""": True,
"""torchscript""": True,
"""torch_dtype""": """float16""",
"""use_bfloat16""": True,
"""tf_legacy_loss""": True,
"""pruned_heads""": {"""a""": 1},
"""tie_word_embeddings""": False,
"""is_decoder""": True,
"""cross_attention_hidden_size""": 1_2_8,
"""add_cross_attention""": True,
"""tie_encoder_decoder""": True,
"""max_length""": 5_0,
"""min_length""": 3,
"""do_sample""": True,
"""early_stopping""": True,
"""num_beams""": 3,
"""num_beam_groups""": 3,
"""diversity_penalty""": 0.5,
"""temperature""": 2.0,
"""top_k""": 1_0,
"""top_p""": 0.7,
"""typical_p""": 0.2,
"""repetition_penalty""": 0.8,
"""length_penalty""": 0.8,
"""no_repeat_ngram_size""": 5,
"""encoder_no_repeat_ngram_size""": 5,
"""bad_words_ids""": [1, 2, 3],
"""num_return_sequences""": 3,
"""chunk_size_feed_forward""": 5,
"""output_scores""": True,
"""return_dict_in_generate""": True,
"""forced_bos_token_id""": 2,
"""forced_eos_token_id""": 3,
"""remove_invalid_values""": True,
"""architectures""": ["""BertModel"""],
"""finetuning_task""": """translation""",
"""id2label""": {0: """label"""},
"""label2id""": {"""label""": """0"""},
"""tokenizer_class""": """BertTokenizerFast""",
"""prefix""": """prefix""",
"""bos_token_id""": 6,
"""pad_token_id""": 7,
"""eos_token_id""": 8,
"""sep_token_id""": 9,
"""decoder_start_token_id""": 1_0,
"""exponential_decay_length_penalty""": (5, 1.01),
"""suppress_tokens""": [0, 1],
"""begin_suppress_tokens""": 2,
"""task_specific_params""": {"""translation""": """some_params"""},
"""problem_type""": """regression""",
}
@is_staging_test
class A__ ( unittest.TestCase):
@classmethod
def UpperCamelCase__ ( cls ):
lowerCamelCase : int = TOKEN
HfFolder.save_token(__magic_name__ )
@classmethod
def UpperCamelCase__ ( cls ):
try:
delete_repo(token=cls._token , repo_id="""test-config""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-config-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-config""" )
except HTTPError:
pass
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
config.push_to_hub("""test-config""" , use_auth_token=self._token )
lowerCamelCase : Any = BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-config""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__magic_name__ , repo_id="""test-config""" , push_to_hub=__magic_name__ , use_auth_token=self._token )
lowerCamelCase : Optional[Any] = BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
def UpperCamelCase__ ( self ):
lowerCamelCase : Dict = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
config.push_to_hub("""valid_org/test-config-org""" , use_auth_token=self._token )
lowerCamelCase : Optional[int] = BertConfig.from_pretrained("""valid_org/test-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-config-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__magic_name__ , repo_id="""valid_org/test-config-org""" , push_to_hub=__magic_name__ , use_auth_token=self._token )
lowerCamelCase : List[str] = BertConfig.from_pretrained("""valid_org/test-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
def UpperCamelCase__ ( self ):
CustomConfig.register_for_auto_class()
lowerCamelCase : Optional[Any] = CustomConfig(attribute=4_2 )
config.push_to_hub("""test-dynamic-config""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"""AutoConfig""": """custom_configuration.CustomConfig"""} )
lowerCamelCase : List[str] = AutoConfig.from_pretrained(F'''{USER}/test-dynamic-config''' , trust_remote_code=__magic_name__ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , """CustomConfig""" )
self.assertEqual(new_config.attribute , 4_2 )
class A__ ( unittest.TestCase):
def UpperCamelCase__ ( self ):
lowerCamelCase : str = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
lowerCamelCase : Optional[int] = c.n_embd + 1 # int
lowerCamelCase : Optional[int] = c.resid_pdrop + 1.0 # float
lowerCamelCase : Tuple = not c.scale_attn_weights # bool
lowerCamelCase : Any = c.summary_type + """foo""" # str
c.update_from_string(
F'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(__magic_name__ , c.n_embd , """mismatch for key: n_embd""" )
self.assertEqual(__magic_name__ , c.resid_pdrop , """mismatch for key: resid_pdrop""" )
self.assertEqual(__magic_name__ , c.scale_attn_weights , """mismatch for key: scale_attn_weights""" )
self.assertEqual(__magic_name__ , c.summary_type , """mismatch for key: summary_type""" )
def UpperCamelCase__ ( self ):
lowerCamelCase : str = PretrainedConfig()
lowerCamelCase : int = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
__magic_name__ , ["""is_encoder_decoder""", """_name_or_path""", """_commit_hash""", """transformers_version"""] )
lowerCamelCase : List[str] = [key for key, value in config_common_kwargs.items() if value == getattr(__magic_name__ , __magic_name__ )]
if len(__magic_name__ ) > 0:
raise ValueError(
"""The following keys are set with the default values in"""
""" `test_configuration_common.config_common_kwargs` pick another value for them:"""
F''' {", ".join(__magic_name__ )}.''' )
def UpperCamelCase__ ( self ):
with self.assertRaises(__magic_name__ ):
# config is in subfolder, the following should not work without specifying the subfolder
lowerCamelCase : Dict = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert-subfolder""" )
lowerCamelCase : str = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert-subfolder""" , subfolder="""bert""" )
self.assertIsNotNone(__magic_name__ )
def UpperCamelCase__ ( self ):
# A mock response for an HTTP head request to emulate server down
lowerCamelCase : Dict = mock.Mock()
lowerCamelCase : Optional[int] = 5_0_0
lowerCamelCase : List[Any] = {}
lowerCamelCase : Tuple = HTTPError
lowerCamelCase : Union[str, Any] = {}
# Download this model to make sure it's in the cache.
lowerCamelCase : List[str] = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=__magic_name__ ) as mock_head:
lowerCamelCase : Any = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCamelCase__ ( self ):
# This test is for deprecated behavior and can be removed in v5
lowerCamelCase : List[str] = BertConfig.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json""" )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = AutoConfig.from_pretrained("""bert-base-cased""" )
lowerCamelCase : Optional[Any] = ["""config.4.0.0.json"""]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(__magic_name__ )
lowerCamelCase : str = 2
json.dump(configuration.to_dict() , open(os.path.join(__magic_name__ , """config.4.0.0.json""" ) , """w""" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained(__magic_name__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
lowerCamelCase : Any = ["""config.42.0.0.json"""]
lowerCamelCase : Optional[Any] = 7_6_8
configuration.save_pretrained(__magic_name__ )
shutil.move(os.path.join(__magic_name__ , """config.4.0.0.json""" ) , os.path.join(__magic_name__ , """config.42.0.0.json""" ) )
lowerCamelCase : int = AutoConfig.from_pretrained(__magic_name__ )
self.assertEqual(new_configuration.hidden_size , 7_6_8 )
def UpperCamelCase__ ( self ):
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
lowerCamelCase : str = """hf-internal-testing/test-two-configs"""
import transformers as new_transformers
lowerCamelCase : Tuple = """v4.0.0"""
lowerCamelCase , lowerCamelCase : Optional[int] = new_transformers.models.auto.AutoConfig.from_pretrained(
__magic_name__ , return_unused_kwargs=__magic_name__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(__magic_name__ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
lowerCamelCase : Tuple = """v3.0.0"""
lowerCamelCase : Any = old_transformers.models.auto.AutoConfig.from_pretrained(__magic_name__ )
self.assertEqual(old_configuration.hidden_size , 7_6_8 )
| 287 | 1 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCamelCase ( A__ , unittest.TestCase ):
'''simple docstring'''
a_ : str = CanineTokenizer
a_ : Union[str, Any] = False
def lowerCamelCase ( self : int ):
super().setUp()
lowerCAmelCase_ : List[Any] = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCamelCase ( self : Dict ):
return CanineTokenizer.from_pretrained("google/canine-s" )
def lowerCamelCase ( self : int , **a_ : Union[str, Any] ):
lowerCAmelCase_ : int = self.tokenizer_class.from_pretrained(self.tmpdirname , **a_ )
lowerCAmelCase_ : List[str] = 10_24
return tokenizer
@require_torch
def lowerCamelCase ( self : Dict ):
lowerCAmelCase_ : Dict = self.canine_tokenizer
lowerCAmelCase_ : Optional[int] = ["Life is like a box of chocolates.", "You never know what you're gonna get."]
# fmt: off
lowerCAmelCase_ : Dict = [5_73_44, 76, 1_05, 1_02, 1_01, 32, 1_05, 1_15, 32, 1_08, 1_05, 1_07, 1_01, 32, 97, 32, 98, 1_11, 1_20, 32, 1_11, 1_02, 32, 99, 1_04, 1_11, 99, 1_11, 1_08, 97, 1_16, 1_01, 1_15, 46, 5_73_45, 0, 0, 0, 0]
# fmt: on
lowerCAmelCase_ : Optional[int] = tokenizer(a_ , padding=a_ , return_tensors="pt" )
self.assertIsInstance(a_ , a_ )
lowerCAmelCase_ : List[Any] = list(batch.input_ids.numpy()[0] )
self.assertListEqual(a_ , a_ )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def lowerCamelCase ( self : Any ):
lowerCAmelCase_ : Tuple = self.canine_tokenizer
lowerCAmelCase_ : str = ["Once there was a man.", "He wrote a test in HuggingFace Tranformers."]
lowerCAmelCase_ : Optional[int] = tokenizer(a_ , padding=a_ , return_tensors="pt" )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn("input_ids" , a_ )
self.assertIn("attention_mask" , a_ )
self.assertIn("token_type_ids" , a_ )
@require_torch
def lowerCamelCase ( self : Optional[Any] ):
lowerCAmelCase_ : Dict = self.canine_tokenizer
lowerCAmelCase_ : Any = [
"What's the weater?",
"It's about 25 degrees.",
]
lowerCAmelCase_ : Any = tokenizer(
text_target=a_ , max_length=32 , padding="max_length" , truncation=a_ , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
def lowerCamelCase ( self : Tuple ):
# safety check on max_len default value so we are sure the test works
lowerCAmelCase_ : Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
lowerCAmelCase_ : str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCAmelCase_ : Dict = tempfile.mkdtemp()
lowerCAmelCase_ : Optional[int] = " He is very happy, UNwant\u00E9d,running"
lowerCAmelCase_ : int = tokenizer.encode(a_ , add_special_tokens=a_ )
tokenizer.save_pretrained(a_ )
lowerCAmelCase_ : Union[str, Any] = tokenizer.__class__.from_pretrained(a_ )
lowerCAmelCase_ : List[Any] = after_tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertListEqual(a_ , a_ )
shutil.rmtree(a_ )
lowerCAmelCase_ : Optional[Any] = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCAmelCase_ : Dict = tempfile.mkdtemp()
lowerCAmelCase_ : List[Any] = " He is very happy, UNwant\u00E9d,running"
lowerCAmelCase_ : Optional[Any] = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
lowerCAmelCase_ : int = chr(0xE0_07 )
additional_special_tokens.append(a_ )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
lowerCAmelCase_ : List[Any] = tokenizer.encode(a_ , add_special_tokens=a_ )
tokenizer.save_pretrained(a_ )
lowerCAmelCase_ : Optional[int] = tokenizer.__class__.from_pretrained(a_ )
lowerCAmelCase_ : Optional[int] = after_tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertListEqual(a_ , a_ )
self.assertIn(a_ , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
lowerCAmelCase_ : str = tokenizer.__class__.from_pretrained(a_ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(a_ )
def lowerCamelCase ( self : Dict ):
lowerCAmelCase_ : str = self.get_tokenizers(do_lower_case=a_ )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = self.get_clean_sequence(a_ )
# a special token for Canine can be defined as follows:
lowerCAmelCase_ : str = 0xE0_05
lowerCAmelCase_ : Optional[Any] = chr(a_ )
tokenizer.add_special_tokens({"cls_token": special_token} )
lowerCAmelCase_ : Tuple = tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertEqual(len(a_ ) , 1 )
lowerCAmelCase_ : Optional[Any] = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=a_ )
lowerCAmelCase_ : Dict = tokenizer.encode(a_ , add_special_tokens=a_ )
lowerCAmelCase_ : Dict = tokenizer.encode(a_ , add_special_tokens=a_ )
lowerCAmelCase_ : Tuple = tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertEqual(a_ , input_encoded + special_token_id )
lowerCAmelCase_ : List[Any] = tokenizer.decode(a_ , skip_special_tokens=a_ )
self.assertTrue(special_token not in decoded )
def lowerCamelCase ( self : Optional[int] ):
lowerCAmelCase_ : Any = self.get_tokenizers(do_lower_case=a_ )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
lowerCAmelCase_ : Tuple = chr(0xE0_05 )
lowerCAmelCase_ : List[Any] = chr(0xE0_06 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=a_ )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({"additional_special_tokens": [SPECIAL_TOKEN_2]} )
lowerCAmelCase_ : Union[str, Any] = tokenizer.tokenize(a_ )
lowerCAmelCase_ : Optional[Any] = tokenizer.tokenize(a_ )
self.assertEqual(len(a_ ) , 1 )
self.assertEqual(len(a_ ) , 1 )
self.assertEqual(token_a[0] , a_ )
self.assertEqual(token_a[0] , a_ )
@require_tokenizers
def lowerCamelCase ( self : List[Any] ):
lowerCAmelCase_ : Tuple = self.get_tokenizers(do_lower_case=a_ )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# a special token for Canine can be defined as follows:
lowerCAmelCase_ : str = 0xE0_06
lowerCAmelCase_ : Optional[int] = chr(a_ )
lowerCAmelCase_ : Optional[Any] = AddedToken(a_ , lstrip=a_ )
tokenizer.add_special_tokens({"additional_special_tokens": [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(a_ )
tokenizer.from_pretrained(a_ )
def lowerCamelCase ( self : List[str] ):
lowerCAmelCase_ : Tuple = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(a_ )
with open(os.path.join(a_ , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
lowerCAmelCase_ : Optional[Any] = json.load(a_ )
with open(os.path.join(a_ , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
lowerCAmelCase_ : Any = json.load(a_ )
# a special token for Canine can be defined as follows:
lowerCAmelCase_ : List[Any] = 0xE0_06
lowerCAmelCase_ : int = chr(a_ )
lowerCAmelCase_ : List[Any] = [new_token_a]
lowerCAmelCase_ : Tuple = [new_token_a]
with open(os.path.join(a_ , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(a_ , a_ )
with open(os.path.join(a_ , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(a_ , a_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCAmelCase_ : Any = tokenizer_class.from_pretrained(a_ , extra_ids=0 )
self.assertIn(a_ , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
lowerCAmelCase_ : List[Any] = 0xE0_07
lowerCAmelCase_ : Optional[Any] = chr(a_ )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCAmelCase_ : Optional[Any] = [AddedToken(a_ , lstrip=a_ )]
lowerCAmelCase_ : Optional[int] = tokenizer_class.from_pretrained(
a_ , additional_special_tokens=a_ , extra_ids=0 )
self.assertIn(a_ , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def lowerCamelCase ( self : List[str] ):
lowerCAmelCase_ : List[Any] = self.get_tokenizers(do_lower_case=a_ )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
lowerCAmelCase_ : Any = "hello world"
if self.space_between_special_tokens:
lowerCAmelCase_ : Union[str, Any] = "[CLS] hello world [SEP]"
else:
lowerCAmelCase_ : Optional[Any] = input
lowerCAmelCase_ : Union[str, Any] = tokenizer.encode(a_ , add_special_tokens=a_ )
lowerCAmelCase_ : int = tokenizer.decode(a_ , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(a_ , [output, output.lower()] )
def lowerCamelCase ( self : str ):
lowerCAmelCase_ : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
lowerCAmelCase_ : List[Any] = [
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
lowerCAmelCase_ : Any = "a"
lowerCAmelCase_ : Union[str, Any] = ord(a_ )
for attr in attributes_list:
setattr(a_ , attr + "_id" , a_ )
self.assertEqual(getattr(a_ , a_ ) , a_ )
self.assertEqual(getattr(a_ , attr + "_id" ) , a_ )
setattr(a_ , attr + "_id" , a_ )
self.assertEqual(getattr(a_ , a_ ) , a_ )
self.assertEqual(getattr(a_ , attr + "_id" ) , a_ )
setattr(a_ , "additional_special_tokens_ids" , [] )
self.assertListEqual(getattr(a_ , "additional_special_tokens" ) , [] )
self.assertListEqual(getattr(a_ , "additional_special_tokens_ids" ) , [] )
lowerCAmelCase_ : Any = 0xE0_06
lowerCAmelCase_ : Tuple = chr(a_ )
setattr(a_ , "additional_special_tokens_ids" , [additional_special_token_id] )
self.assertListEqual(getattr(a_ , "additional_special_tokens" ) , [additional_special_token] )
self.assertListEqual(getattr(a_ , "additional_special_tokens_ids" ) , [additional_special_token_id] )
def lowerCamelCase ( self : str ):
pass
def lowerCamelCase ( self : List[str] ):
pass
def lowerCamelCase ( self : Union[str, Any] ):
pass
def lowerCamelCase ( self : Optional[Any] ):
pass
def lowerCamelCase ( self : Union[str, Any] ):
pass
def lowerCamelCase ( self : Tuple ):
pass
def lowerCamelCase ( self : Union[str, Any] ):
pass
def lowerCamelCase ( self : Tuple ):
pass
| 161 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"""SCUT-DLVCLab/lilt-roberta-en-base""": (
"""https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json"""
),
}
class __lowerCamelCase ( A__ ):
'''simple docstring'''
a_ : List[Any] = """lilt"""
def __init__( self : Any , a_ : List[str]=3_05_22 , a_ : List[Any]=7_68 , a_ : Tuple=12 , a_ : Tuple=12 , a_ : str=30_72 , a_ : Union[str, Any]="gelu" , a_ : Union[str, Any]=0.1 , a_ : List[Any]=0.1 , a_ : List[Any]=5_12 , a_ : List[str]=2 , a_ : int=0.02 , a_ : Optional[int]=1e-1_2 , a_ : Any=0 , a_ : str="absolute" , a_ : List[Any]=None , a_ : Optional[int]=4 , a_ : str=10_24 , **a_ : Union[str, Any] , ):
super().__init__(pad_token_id=a_ , **a_ )
lowerCAmelCase_ : List[str] = vocab_size
lowerCAmelCase_ : List[str] = hidden_size
lowerCAmelCase_ : int = num_hidden_layers
lowerCAmelCase_ : Any = num_attention_heads
lowerCAmelCase_ : str = hidden_act
lowerCAmelCase_ : str = intermediate_size
lowerCAmelCase_ : List[str] = hidden_dropout_prob
lowerCAmelCase_ : Any = attention_probs_dropout_prob
lowerCAmelCase_ : int = max_position_embeddings
lowerCAmelCase_ : Any = type_vocab_size
lowerCAmelCase_ : List[Any] = initializer_range
lowerCAmelCase_ : str = layer_norm_eps
lowerCAmelCase_ : Tuple = position_embedding_type
lowerCAmelCase_ : Union[str, Any] = classifier_dropout
lowerCAmelCase_ : Optional[Any] = channel_shrink_ratio
lowerCAmelCase_ : Dict = max_ad_position_embeddings
| 161 | 1 |
from __future__ import annotations
import numpy as np
def lowercase( UpperCamelCase_ ) -> tuple[np.ndarray, np.ndarray]:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = np.shape(UpperCamelCase_ )
if rows != columns:
UpperCamelCase = (
"""'table' has to be of square shaped array but got a """
f"""{rows}x{columns} array:\n{table}"""
)
raise ValueError(UpperCamelCase_ )
UpperCamelCase = np.zeros((rows, columns) )
UpperCamelCase = np.zeros((rows, columns) )
for i in range(UpperCamelCase_ ):
for j in range(UpperCamelCase_ ):
UpperCamelCase = sum(lower[i][k] * upper[k][j] for k in range(UpperCamelCase_ ) )
if upper[j][j] == 0:
raise ArithmeticError("""No LU decomposition exists""" )
UpperCamelCase = (table[i][j] - total) / upper[j][j]
UpperCamelCase = 1
for j in range(UpperCamelCase_ , UpperCamelCase_ ):
UpperCamelCase = sum(lower[i][k] * upper[k][j] for k in range(UpperCamelCase_ ) )
UpperCamelCase = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 343 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"""microsoft/trocr-base-handwritten""": (
"""https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"""
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
__lowerCAmelCase = """trocr"""
__lowerCAmelCase = ["""past_key_values"""]
__lowerCAmelCase = {
"""num_attention_heads""": """decoder_attention_heads""",
"""hidden_size""": """d_model""",
"""num_hidden_layers""": """decoder_layers""",
}
def __init__( self : Optional[Any] , lowerCamelCase_ : Optional[int]=5_0265 , lowerCamelCase_ : Optional[int]=1024 , lowerCamelCase_ : List[Any]=12 , lowerCamelCase_ : Any=16 , lowerCamelCase_ : Tuple=4096 , lowerCamelCase_ : Tuple="gelu" , lowerCamelCase_ : List[str]=512 , lowerCamelCase_ : Union[str, Any]=0.1 , lowerCamelCase_ : List[str]=0.0 , lowerCamelCase_ : Optional[int]=0.0 , lowerCamelCase_ : Union[str, Any]=2 , lowerCamelCase_ : Tuple=0.0_2 , lowerCamelCase_ : Union[str, Any]=0.0 , lowerCamelCase_ : str=True , lowerCamelCase_ : List[Any]=False , lowerCamelCase_ : List[str]=True , lowerCamelCase_ : List[Any]=True , lowerCamelCase_ : List[str]=1 , lowerCamelCase_ : Optional[Any]=0 , lowerCamelCase_ : List[Any]=2 , **lowerCamelCase_ : Union[str, Any] , ):
"""simple docstring"""
UpperCamelCase = vocab_size
UpperCamelCase = d_model
UpperCamelCase = decoder_layers
UpperCamelCase = decoder_attention_heads
UpperCamelCase = decoder_ffn_dim
UpperCamelCase = activation_function
UpperCamelCase = max_position_embeddings
UpperCamelCase = dropout
UpperCamelCase = attention_dropout
UpperCamelCase = activation_dropout
UpperCamelCase = init_std
UpperCamelCase = decoder_layerdrop
UpperCamelCase = use_cache
UpperCamelCase = scale_embedding
UpperCamelCase = use_learned_position_embeddings
UpperCamelCase = layernorm_embedding
super().__init__(
pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , decoder_start_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
| 343 | 1 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
__UpperCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(__UpperCamelCase )
class __a ( __UpperCamelCase ):
def __init__( self : List[str] , *UpperCAmelCase : int , **UpperCAmelCase : str ):
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def A ( self : Tuple , UpperCAmelCase : int=None ):
lowerCAmelCase_ : Union[str, Any] = {}
if top_k is not None:
lowerCAmelCase_ : Union[str, Any] = top_k
return {}, {}, postprocess_params
def __call__( self : Union[str, Any] , UpperCAmelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **UpperCAmelCase : Any ):
return super().__call__(UpperCAmelCase , **UpperCAmelCase )
def A ( self : Dict , UpperCAmelCase : Optional[int] ):
lowerCAmelCase_ : List[str] = load_image(UpperCAmelCase )
lowerCAmelCase_ : List[str] = self.image_processor(images=UpperCAmelCase , return_tensors=self.framework )
return model_inputs
def A ( self : Dict , UpperCAmelCase : Tuple ):
lowerCAmelCase_ : Dict = self.model(**UpperCAmelCase )
return model_outputs
def A ( self : Tuple , UpperCAmelCase : int , UpperCAmelCase : List[Any]=5 ):
if top_k > self.model.config.num_labels:
lowerCAmelCase_ : List[str] = self.model.config.num_labels
if self.framework == "pt":
lowerCAmelCase_ : Union[str, Any] = model_outputs.logits.softmax(-1 )[0]
lowerCAmelCase_ : Optional[int] = probs.topk(UpperCAmelCase )
elif self.framework == "tf":
lowerCAmelCase_ : List[Any] = stable_softmax(model_outputs.logits , axis=-1 )[0]
lowerCAmelCase_ : Tuple = tf.math.top_k(UpperCAmelCase , k=UpperCAmelCase )
lowerCAmelCase_ : Union[str, Any] = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
lowerCAmelCase_ : str = scores.tolist()
lowerCAmelCase_ : List[Any] = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(UpperCAmelCase , UpperCAmelCase )]
| 353 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class __a ( __UpperCamelCase ):
__snake_case : torch.FloatTensor
__snake_case : torch.FloatTensor
__snake_case : Optional[torch.FloatTensor] = None
class __a ( __UpperCamelCase ,__UpperCamelCase ):
__snake_case : Optional[Any] = 2
@register_to_config
def __init__( self : str , UpperCAmelCase : float = 0.02 , UpperCAmelCase : float = 1_00 , UpperCAmelCase : float = 1.007 , UpperCAmelCase : float = 80 , UpperCAmelCase : float = 0.05 , UpperCAmelCase : float = 50 , ):
# standard deviation of the initial noise distribution
lowerCAmelCase_ : List[Any] = sigma_max
# setable values
lowerCAmelCase_ : int = None
lowerCAmelCase_ : np.IntTensor = None
lowerCAmelCase_ : torch.FloatTensor = None # sigma(t_i)
def A ( self : Any , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : Optional[int] = None ):
return sample
def A ( self : int , UpperCAmelCase : int , UpperCAmelCase : Union[str, torch.device] = None ):
lowerCAmelCase_ : Dict = num_inference_steps
lowerCAmelCase_ : Dict = np.arange(0 , self.num_inference_steps )[::-1].copy()
lowerCAmelCase_ : str = torch.from_numpy(UpperCAmelCase ).to(UpperCAmelCase )
lowerCAmelCase_ : List[str] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
lowerCAmelCase_ : Dict = torch.tensor(UpperCAmelCase , dtype=torch.floataa , device=UpperCAmelCase )
def A ( self : str , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : float , UpperCAmelCase : Optional[torch.Generator] = None ):
if self.config.s_min <= sigma <= self.config.s_max:
lowerCAmelCase_ : List[str] = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
lowerCAmelCase_ : List[str] = 0
# sample eps ~ N(0, S_noise^2 * I)
lowerCAmelCase_ : Any = self.config.s_noise * randn_tensor(sample.shape , generator=UpperCAmelCase ).to(sample.device )
lowerCAmelCase_ : int = sigma + gamma * sigma
lowerCAmelCase_ : List[Any] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def A ( self : Optional[int] , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : float , UpperCAmelCase : float , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : bool = True , ):
lowerCAmelCase_ : List[str] = sample_hat + sigma_hat * model_output
lowerCAmelCase_ : Optional[Any] = (sample_hat - pred_original_sample) / sigma_hat
lowerCAmelCase_ : Tuple = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=UpperCAmelCase , derivative=UpperCAmelCase , pred_original_sample=UpperCAmelCase )
def A ( self : List[str] , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : float , UpperCAmelCase : float , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : bool = True , ):
lowerCAmelCase_ : Any = sample_prev + sigma_prev * model_output
lowerCAmelCase_ : Optional[int] = (sample_prev - pred_original_sample) / sigma_prev
lowerCAmelCase_ : str = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=UpperCAmelCase , derivative=UpperCAmelCase , pred_original_sample=UpperCAmelCase )
def A ( self : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : int , UpperCAmelCase : List[str] ):
raise NotImplementedError()
| 28 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class _A ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[Any]=13 , __UpperCAmelCase : List[str]=7 , __UpperCAmelCase : Optional[int]=True , __UpperCAmelCase : List[str]=True , __UpperCAmelCase : List[Any]=True , __UpperCAmelCase : List[str]=True , __UpperCAmelCase : Optional[Any]=99 , __UpperCAmelCase : List[str]=32 , __UpperCAmelCase : List[Any]=5 , __UpperCAmelCase : Optional[int]=4 , __UpperCAmelCase : List[str]=37 , __UpperCAmelCase : List[Any]="gelu" , __UpperCAmelCase : Any=0.1 , __UpperCAmelCase : Tuple=0.1 , __UpperCAmelCase : List[str]=512 , __UpperCAmelCase : List[str]=16 , __UpperCAmelCase : List[str]=2 , __UpperCAmelCase : int=0.02 , __UpperCAmelCase : Dict=4 , ):
a : Any = parent
a : Any = batch_size
a : List[str] = seq_length
a : Optional[Any] = is_training
a : int = use_attention_mask
a : Optional[Any] = use_token_type_ids
a : Any = use_labels
a : Any = vocab_size
a : Any = hidden_size
a : Any = num_hidden_layers
a : Dict = num_attention_heads
a : str = intermediate_size
a : Any = hidden_act
a : str = hidden_dropout_prob
a : Union[str, Any] = attention_probs_dropout_prob
a : Tuple = max_position_embeddings
a : Dict = type_vocab_size
a : Dict = type_sequence_label_size
a : List[str] = initializer_range
a : List[Any] = num_choices
def __snake_case ( self : str):
a : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a : Any = None
if self.use_attention_mask:
a : Any = random_attention_mask([self.batch_size, self.seq_length])
a : Union[str, Any] = None
if self.use_token_type_ids:
a : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
a : Union[str, Any] = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __snake_case ( self : Optional[int]):
a : Any = self.prepare_config_and_inputs()
a , a , a , a : str = config_and_inputs
a : Any = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class _A ( _a ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : Union[str, Any] = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __snake_case ( self : str):
a : List[Any] = FlaxAlbertModelTester(self)
@slow
def __snake_case ( self : Any):
for model_class_name in self.all_model_classes:
a : Dict = model_class_name.from_pretrained("albert-base-v2")
a : List[str] = model(np.ones((1, 1)))
self.assertIsNotNone(__UpperCAmelCase)
@require_flax
class _A ( unittest.TestCase ):
"""simple docstring"""
@slow
def __snake_case ( self : int):
a : Optional[Any] = FlaxAlbertModel.from_pretrained("albert-base-v2")
a : Tuple = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]])
a : Any = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
a : Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase)[0]
a : Optional[Any] = (1, 11, 768)
self.assertEqual(output.shape , __UpperCAmelCase)
a : Optional[Any] = np.array(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]])
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , __UpperCAmelCase , atol=1e-4))
| 40 |
"""simple docstring"""
def lowercase ( A_ )-> bool:
'''simple docstring'''
if not all(x.isalpha() for x in string ):
raise ValueError("String must only contain alphabetic characters." )
a : Tuple = sorted(string.lower() )
return len(A_ ) == len(set(A_ ) )
if __name__ == "__main__":
__lowercase = input("""Enter a string """).strip()
__lowercase = is_isogram(input_str)
print(f'''{input_str} is {'an' if isogram else 'not an'} isogram.''')
| 40 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase__ = {
"configuration_electra": ["ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "ElectraConfig", "ElectraOnnxConfig"],
"tokenization_electra": ["ElectraTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ["ElectraTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"ElectraForCausalLM",
"ElectraForMaskedLM",
"ElectraForMultipleChoice",
"ElectraForPreTraining",
"ElectraForQuestionAnswering",
"ElectraForSequenceClassification",
"ElectraForTokenClassification",
"ElectraModel",
"ElectraPreTrainedModel",
"load_tf_weights_in_electra",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFElectraForMaskedLM",
"TFElectraForMultipleChoice",
"TFElectraForPreTraining",
"TFElectraForQuestionAnswering",
"TFElectraForSequenceClassification",
"TFElectraForTokenClassification",
"TFElectraModel",
"TFElectraPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"FlaxElectraForCausalLM",
"FlaxElectraForMaskedLM",
"FlaxElectraForMultipleChoice",
"FlaxElectraForPreTraining",
"FlaxElectraForQuestionAnswering",
"FlaxElectraForSequenceClassification",
"FlaxElectraForTokenClassification",
"FlaxElectraModel",
"FlaxElectraPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 353 |
UpperCamelCase__ = {
"meter": "m",
"kilometer": "km",
"megametre": "Mm",
"gigametre": "Gm",
"terametre": "Tm",
"petametre": "Pm",
"exametre": "Em",
"zettametre": "Zm",
"yottametre": "Ym",
}
# Exponent of the factor(meter)
UpperCamelCase__ = {
"m": 0,
"km": 3,
"Mm": 6,
"Gm": 9,
"Tm": 12,
"Pm": 15,
"Em": 18,
"Zm": 21,
"Ym": 24,
}
def _UpperCamelCase (a__ :float , a__ :str , a__ :str ):
"""simple docstring"""
UpperCamelCase__ = from_type.lower().strip("""s""" )
UpperCamelCase__ = to_type.lower().strip("""s""" )
UpperCamelCase__ = UNIT_SYMBOL.get(a__ , a__ )
UpperCamelCase__ = UNIT_SYMBOL.get(a__ , a__ )
if from_sanitized not in METRIC_CONVERSION:
UpperCamelCase__ = (
f"""Invalid 'from_type' value: {from_type!r}.\n"""
f"""Conversion abbreviations are: {", ".join(a__ )}"""
)
raise ValueError(a__ )
if to_sanitized not in METRIC_CONVERSION:
UpperCamelCase__ = (
f"""Invalid 'to_type' value: {to_type!r}.\n"""
f"""Conversion abbreviations are: {", ".join(a__ )}"""
)
raise ValueError(a__ )
UpperCamelCase__ = METRIC_CONVERSION[from_sanitized]
UpperCamelCase__ = METRIC_CONVERSION[to_sanitized]
UpperCamelCase__ = 1
if from_exponent > to_exponent:
UpperCamelCase__ = from_exponent - to_exponent
else:
UpperCamelCase__ = -(to_exponent - from_exponent)
return value * pow(10 , a__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 87 | 0 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> list:
if any(not isinstance(lowercase_ , lowercase_ ) or x < 0 for x in sequence ):
raise TypeError("Sequence must be list of non-negative integers" )
for _ in range(len(lowercase_ ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(lowercase_ , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 247 |
"""simple docstring"""
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
SCREAMING_SNAKE_CASE = [
"cross_validation.py",
"gradient_accumulation.py",
"local_sgd.py",
"multi_process_metrics.py",
"memory.py",
"automatic_gradient_accumulation.py",
"fsdp_with_peak_mem_tracking.py",
"deepspeed_with_config_support.py",
"megatron_lm_gpt_pretraining.py",
]
class UpperCAmelCase_ ( unittest.TestCase ):
def __magic_name__ ( self : List[Any] , snake_case_ : str , snake_case_ : bool , snake_case_ : str = None , snake_case_ : list = None ) -> Tuple:
'''simple docstring'''
A__ = None
A__ = os.path.abspath(os.path.join("examples" , "by_feature" ) )
A__ = os.path.abspath("examples" )
for item in os.listdir(snake_case_ ):
if item not in EXCLUDE_EXAMPLES:
A__ = os.path.join(snake_case_ , snake_case_ )
if os.path.isfile(snake_case_ ) and ".py" in item_path:
with self.subTest(
tested_script=snake_case_ , feature_script=snake_case_ , tested_section="main()" if parser_only else "training_function()" , ):
A__ = compare_against_test(
os.path.join(snake_case_ , snake_case_ ) , snake_case_ , snake_case_ , snake_case_ )
A__ = "\n".join(snake_case_ )
if special_strings is not None:
for string in special_strings:
A__ = diff.replace(snake_case_ , "" )
self.assertEqual(snake_case_ , "" )
def __magic_name__ ( self : List[str] ) -> str:
'''simple docstring'''
self.one_complete_example("complete_nlp_example.py" , snake_case_ )
self.one_complete_example("complete_nlp_example.py" , snake_case_ )
def __magic_name__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
A__ = os.path.abspath(os.path.join("examples" , "cv_example.py" ) )
A__ = [
" " * 16 + "{\n\n",
" " * 20 + "\"accuracy\": eval_metric[\"accuracy\"],\n\n",
" " * 20 + "\"f1\": eval_metric[\"f1\"],\n\n",
" " * 20 + "\"train_loss\": total_loss.item() / len(train_dataloader),\n\n",
" " * 20 + "\"epoch\": epoch,\n\n",
" " * 16 + "},\n\n",
" " * 16 + "step=epoch,\n",
" " * 12,
" " * 8 + "for step, batch in enumerate(active_dataloader):\n",
]
self.one_complete_example("complete_cv_example.py" , snake_case_ , snake_case_ , snake_case_ )
self.one_complete_example("complete_cv_example.py" , snake_case_ , snake_case_ , snake_case_ )
@mock.patch.dict(os.environ, {'''TESTING_MOCKED_DATALOADERS''': '''1'''} )
class UpperCAmelCase_ ( A_ ):
lowercase__ = False
@classmethod
def __magic_name__ ( cls : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
super().setUpClass()
A__ = tempfile.mkdtemp()
A__ = os.path.join(cls._tmpdir , "default_config.yml" )
write_basic_config(save_location=cls.configPath )
A__ = ["accelerate", "launch", "--config_file", cls.configPath]
@classmethod
def __magic_name__ ( cls : Dict ) -> str:
'''simple docstring'''
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def __magic_name__ ( self : Dict ) -> List[Any]:
'''simple docstring'''
A__ = F"""
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , "epoch_0" ) ) )
def __magic_name__ ( self : Any ) -> Any:
'''simple docstring'''
A__ = F"""
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
""".split()
A__ = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , "step_2" ) ) )
def __magic_name__ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
A__ = F"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}
""".split()
A__ = run_command(self._launch_args + testargs , return_stdout=snake_case_ )
self.assertNotIn("epoch 0:" , snake_case_ )
self.assertIn("epoch 1:" , snake_case_ )
def __magic_name__ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
A__ = F"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}
""".split()
A__ = run_command(self._launch_args + testargs , return_stdout=snake_case_ )
if torch.cuda.is_available():
A__ = torch.cuda.device_count()
else:
A__ = 1
if num_processes > 1:
self.assertNotIn("epoch 0:" , snake_case_ )
self.assertIn("epoch 1:" , snake_case_ )
else:
self.assertIn("epoch 0:" , snake_case_ )
self.assertIn("epoch 1:" , snake_case_ )
@slow
def __magic_name__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
A__ = "\n examples/by_feature/cross_validation.py\n --num_folds 2\n ".split()
with mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "0"} ):
A__ = run_command(self._launch_args + testargs , return_stdout=snake_case_ )
A__ = re.findall("({.+})" , snake_case_ )
A__ = [r for r in results if "accuracy" in r][-1]
A__ = ast.literal_eval(snake_case_ )
self.assertGreaterEqual(results["accuracy"] , 0.75 )
def __magic_name__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
A__ = ["examples/by_feature/multi_process_metrics.py"]
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def __magic_name__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
A__ = F"""
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(snake_case_ , "tracking" ) ) )
def __magic_name__ ( self : List[Any] ) -> int:
'''simple docstring'''
A__ = ["examples/by_feature/gradient_accumulation.py"]
run_command(self._launch_args + testargs )
def __magic_name__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
A__ = ["examples/by_feature/local_sgd.py"]
run_command(self._launch_args + testargs )
| 247 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__SCREAMING_SNAKE_CASE ={"""configuration_glpn""": ["""GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GLPNConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE =["""GLPNFeatureExtractor"""]
__SCREAMING_SNAKE_CASE =["""GLPNImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE =[
"""GLPN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GLPNForDepthEstimation""",
"""GLPNLayer""",
"""GLPNModel""",
"""GLPNPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 352 | """simple docstring"""
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
class UpperCamelCase ( lowercase_ ):
lowercase = ['input_values', 'padding_mask']
def __init__( self ,__UpperCamelCase = 1 ,__UpperCamelCase = 2_4000 ,__UpperCamelCase = 0.0 ,__UpperCamelCase = None ,__UpperCamelCase = None ,**__UpperCamelCase ,) -> Any:
'''simple docstring'''
super().__init__(feature_size=__UpperCamelCase ,sampling_rate=__UpperCamelCase ,padding_value=__UpperCamelCase ,**__UpperCamelCase )
lowercase_ : List[str] = chunk_length_s
lowercase_ : Tuple = overlap
@property
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 ,int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self ,__UpperCamelCase ,__UpperCamelCase = None ,__UpperCamelCase = False ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,) -> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
if padding and truncation:
raise ValueError('Both padding and truncation were set. Make sure you only set one.' )
elif padding is None:
# by default let's pad the inputs
lowercase_ : Optional[int] = True
lowercase_ : Optional[int] = bool(
isinstance(__UpperCamelCase ,(list, tuple) ) and (isinstance(raw_audio[0] ,(np.ndarray, tuple, list) )) )
if is_batched:
lowercase_ : int = [np.asarray(__UpperCamelCase ,dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(__UpperCamelCase ,np.ndarray ):
lowercase_ : Any = np.asarray(__UpperCamelCase ,dtype=np.floataa )
elif isinstance(__UpperCamelCase ,np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
lowercase_ : List[str] = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
lowercase_ : Dict = [np.asarray(__UpperCamelCase ).T]
# verify inputs are valid
for idx, example in enumerate(__UpperCamelCase ):
if example.ndim > 2:
raise ValueError(f'''Expected input shape (channels, length) but got shape {example.shape}''' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(f'''Expected mono audio but example has {example.shape[-1]} channels''' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(f'''Expected stereo audio but example has {example.shape[-1]} channels''' )
lowercase_ : Optional[int] = None
lowercase_ : List[Any] = BatchFeature({'input_values': raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
lowercase_ : List[Any] = min(array.shape[0] for array in raw_audio )
lowercase_ : int = int(np.floor(max_length / self.chunk_stride ) )
lowercase_ : Dict = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
lowercase_ : List[Any] = max(array.shape[0] for array in raw_audio )
lowercase_ : Tuple = int(np.ceil(max_length / self.chunk_stride ) )
lowercase_ : List[str] = (nb_step - 1) * self.chunk_stride + self.chunk_length
lowercase_ : Union[str, Any] = 'max_length'
else:
lowercase_ : int = input_values
# normal padding on batch
if padded_inputs is None:
lowercase_ : int = self.pad(
__UpperCamelCase ,max_length=__UpperCamelCase ,truncation=__UpperCamelCase ,padding=__UpperCamelCase ,return_attention_mask=__UpperCamelCase ,)
if padding:
lowercase_ : Optional[int] = padded_inputs.pop('attention_mask' )
lowercase_ : Dict = []
for example in padded_inputs.pop('input_values' ):
if self.feature_size == 1:
lowercase_ : Optional[int] = example[..., None]
input_values.append(example.T )
lowercase_ : str = input_values
if return_tensors is not None:
lowercase_ : List[Any] = padded_inputs.convert_to_tensors(__UpperCamelCase )
return padded_inputs
| 321 | 0 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
_A : str ='''pytorch_model.bin'''
_A : Dict ='''pytorch_model.bin.index.json'''
_A : Optional[int] ='''adapter_config.json'''
_A : List[Any] ='''adapter_model.bin'''
_A : Tuple ='''adapter_model.safetensors'''
_A : Dict ='''tf_model.h5'''
_A : Optional[int] ='''tf_model.h5.index.json'''
_A : Optional[int] ='''model.ckpt'''
_A : int ='''flax_model.msgpack'''
_A : Tuple ='''flax_model.msgpack.index.json'''
_A : Any ='''model.safetensors'''
_A : Any ='''model.safetensors.index.json'''
_A : Any ='''config.json'''
_A : Any ='''preprocessor_config.json'''
_A : Tuple =FEATURE_EXTRACTOR_NAME
_A : Dict ='''generation_config.json'''
_A : str ='''modelcard.json'''
_A : Dict ='''▁'''
_A : Dict =SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
_A : Tuple =[
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
_A : Optional[Any] =[[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
_A : Optional[Any] =[[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Dict:
if version.parse(UpperCamelCase ) < version.parse(UpperCamelCase ):
if "dev" in min_version:
lowerCamelCase__ : Dict = (
"""This example requires a source install from HuggingFace Transformers (see """
"""`https://huggingface.co/docs/transformers/installation#install-from-source`),"""
)
else:
lowerCamelCase__ : int = f'''This example requires a minimum version of {min_version},'''
error_message += f''' but the version found is {__version__}.\n'''
raise ImportError(
error_message
+ """Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other """
"""versions of HuggingFace Transformers.""" )
| 41 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _lowercase ( _lowercase ):
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCamelCase__ , """tf_padding""" ) )
self.parent.assertTrue(hasattr(UpperCamelCase__ , """depth_multiplier""" ) )
class _lowercase :
def __init__( self: str , UpperCamelCase__: Dict , UpperCamelCase__: Tuple=13 , UpperCamelCase__: Optional[int]=3 , UpperCamelCase__: List[Any]=32 , UpperCamelCase__: Optional[Any]=0.25 , UpperCamelCase__: int=8 , UpperCamelCase__: Any=True , UpperCamelCase__: Dict=1_024 , UpperCamelCase__: Optional[int]=32 , UpperCamelCase__: Tuple="relu6" , UpperCamelCase__: int=0.1 , UpperCamelCase__: List[Any]=0.02 , UpperCamelCase__: Optional[Any]=True , UpperCamelCase__: Union[str, Any]=True , UpperCamelCase__: Union[str, Any]=10 , UpperCamelCase__: str=None , ):
lowerCamelCase__ : Optional[Any] = parent
lowerCamelCase__ : List[str] = batch_size
lowerCamelCase__ : Optional[int] = num_channels
lowerCamelCase__ : Optional[int] = image_size
lowerCamelCase__ : Optional[Any] = depth_multiplier
lowerCamelCase__ : Union[str, Any] = min_depth
lowerCamelCase__ : Optional[Any] = tf_padding
lowerCamelCase__ : str = int(last_hidden_size * depth_multiplier )
lowerCamelCase__ : Any = output_stride
lowerCamelCase__ : int = hidden_act
lowerCamelCase__ : Tuple = classifier_dropout_prob
lowerCamelCase__ : Dict = use_labels
lowerCamelCase__ : Tuple = is_training
lowerCamelCase__ : Optional[Any] = num_labels
lowerCamelCase__ : Union[str, Any] = initializer_range
lowerCamelCase__ : Optional[Any] = scope
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[Any] = None
lowerCamelCase__ : Dict = None
if self.use_labels:
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase__ : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCamelCase__ : Dict = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCamelCase_ ( self: str ):
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: int , UpperCamelCase__: str , UpperCamelCase__: Any , UpperCamelCase__: Optional[int] ):
lowerCamelCase__ : List[str] = MobileNetVaModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : List[str] = model(UpperCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCamelCase_ ( self: str , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] , UpperCamelCase__: List[Any] , UpperCamelCase__: Union[str, Any] ):
lowerCamelCase__ : List[str] = self.num_labels
lowerCamelCase__ : Optional[Any] = MobileNetVaForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : List[Any] = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : str = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : int = config_and_inputs
lowerCamelCase__ : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( _lowercase , _lowercase , unittest.TestCase ):
a = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
a = (
{"""feature-extraction""": MobileNetVaModel, """image-classification""": MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
a = False
a = False
a = False
a = False
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Optional[int] = MobileNetVaModelTester(self )
lowerCamelCase__ : List[str] = MobileNetVaConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileNetV1 does not use inputs_embeds""" )
def lowerCamelCase_ ( self: Optional[int] ):
pass
@unittest.skip(reason="""MobileNetV1 does not support input and output embeddings""" )
def lowerCamelCase_ ( self: Optional[Any] ):
pass
@unittest.skip(reason="""MobileNetV1 does not output attentions""" )
def lowerCamelCase_ ( self: Any ):
pass
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Dict = model_class(UpperCamelCase__ )
lowerCamelCase__ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : List[Any] = [*signature.parameters.keys()]
lowerCamelCase__ : Dict = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCamelCase_ ( self: str ):
def check_hidden_states_output(UpperCamelCase__: List[Any] , UpperCamelCase__: Dict , UpperCamelCase__: List[Any] ):
lowerCamelCase__ : str = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
lowerCamelCase__ : Union[str, Any] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowerCamelCase__ : List[Any] = outputs.hidden_states
lowerCamelCase__ : Tuple = 26
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
lowerCamelCase__ , lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : List[Any] = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ : Optional[Any] = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
@slow
def lowerCamelCase_ ( self: List[str] ):
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Dict = MobileNetVaModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ () -> Union[str, Any]:
lowerCamelCase__ : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self: Optional[int] ):
return (
MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v1_1.0_224""" ) if is_vision_available() else None
)
@slow
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : List[Any] = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v1_1.0_224""" ).to(UpperCamelCase__ )
lowerCamelCase__ : Dict = self.default_image_processor
lowerCamelCase__ : int = prepare_img()
lowerCamelCase__ : List[Any] = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : str = model(**UpperCamelCase__ )
# verify the logits
lowerCamelCase__ : List[str] = torch.Size((1, 1_001) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
lowerCamelCase__ : List[str] = torch.tensor([-4.1_739, -1.1_233, 3.1_205] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
| 41 | 1 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _SCREAMING_SNAKE_CASE ( metaclass=a_ ):
UpperCAmelCase_ :List[Any] = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> List[str]:
requires_backends(self , ["""sentencepiece"""] )
class _SCREAMING_SNAKE_CASE ( metaclass=a_ ):
UpperCAmelCase_ :Any = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> int:
requires_backends(self , ["""sentencepiece"""] )
class _SCREAMING_SNAKE_CASE ( metaclass=a_ ):
UpperCAmelCase_ :str = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> Optional[int]:
requires_backends(self , ["""sentencepiece"""] )
class _SCREAMING_SNAKE_CASE ( metaclass=a_ ):
UpperCAmelCase_ :int = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> Tuple:
requires_backends(self , ["""sentencepiece"""] )
class _SCREAMING_SNAKE_CASE ( metaclass=a_ ):
UpperCAmelCase_ :Tuple = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> List[str]:
requires_backends(self , ["""sentencepiece"""] )
class _SCREAMING_SNAKE_CASE ( metaclass=a_ ):
UpperCAmelCase_ :List[Any] = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> List[Any]:
requires_backends(self , ["""sentencepiece"""] )
class _SCREAMING_SNAKE_CASE ( metaclass=a_ ):
UpperCAmelCase_ :Any = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> List[str]:
requires_backends(self , ["""sentencepiece"""] )
class _SCREAMING_SNAKE_CASE ( metaclass=a_ ):
UpperCAmelCase_ :Optional[int] = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> List[Any]:
requires_backends(self , ["""sentencepiece"""] )
class _SCREAMING_SNAKE_CASE ( metaclass=a_ ):
UpperCAmelCase_ :List[Any] = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> Dict:
requires_backends(self , ["""sentencepiece"""] )
class _SCREAMING_SNAKE_CASE ( metaclass=a_ ):
UpperCAmelCase_ :Tuple = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> int:
requires_backends(self , ["""sentencepiece"""] )
class _SCREAMING_SNAKE_CASE ( metaclass=a_ ):
UpperCAmelCase_ :Union[str, Any] = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> Dict:
requires_backends(self , ["""sentencepiece"""] )
class _SCREAMING_SNAKE_CASE ( metaclass=a_ ):
UpperCAmelCase_ :str = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> Any:
requires_backends(self , ["""sentencepiece"""] )
class _SCREAMING_SNAKE_CASE ( metaclass=a_ ):
UpperCAmelCase_ :Optional[int] = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> Tuple:
requires_backends(self , ["""sentencepiece"""] )
class _SCREAMING_SNAKE_CASE ( metaclass=a_ ):
UpperCAmelCase_ :Dict = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> Optional[int]:
requires_backends(self , ["""sentencepiece"""] )
class _SCREAMING_SNAKE_CASE ( metaclass=a_ ):
UpperCAmelCase_ :Tuple = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> Union[str, Any]:
requires_backends(self , ["""sentencepiece"""] )
class _SCREAMING_SNAKE_CASE ( metaclass=a_ ):
UpperCAmelCase_ :Optional[Any] = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> Dict:
requires_backends(self , ["""sentencepiece"""] )
class _SCREAMING_SNAKE_CASE ( metaclass=a_ ):
UpperCAmelCase_ :Union[str, Any] = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> int:
requires_backends(self , ["""sentencepiece"""] )
class _SCREAMING_SNAKE_CASE ( metaclass=a_ ):
UpperCAmelCase_ :Any = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> str:
requires_backends(self , ["""sentencepiece"""] )
class _SCREAMING_SNAKE_CASE ( metaclass=a_ ):
UpperCAmelCase_ :Any = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> Any:
requires_backends(self , ["""sentencepiece"""] )
class _SCREAMING_SNAKE_CASE ( metaclass=a_ ):
UpperCAmelCase_ :Tuple = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> Any:
requires_backends(self , ["""sentencepiece"""] )
class _SCREAMING_SNAKE_CASE ( metaclass=a_ ):
UpperCAmelCase_ :int = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> List[str]:
requires_backends(self , ["""sentencepiece"""] )
class _SCREAMING_SNAKE_CASE ( metaclass=a_ ):
UpperCAmelCase_ :str = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> Tuple:
requires_backends(self , ["""sentencepiece"""] )
class _SCREAMING_SNAKE_CASE ( metaclass=a_ ):
UpperCAmelCase_ :str = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> List[str]:
requires_backends(self , ["""sentencepiece"""] )
class _SCREAMING_SNAKE_CASE ( metaclass=a_ ):
UpperCAmelCase_ :Dict = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> Optional[Any]:
requires_backends(self , ["""sentencepiece"""] )
class _SCREAMING_SNAKE_CASE ( metaclass=a_ ):
UpperCAmelCase_ :Union[str, Any] = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> List[Any]:
requires_backends(self , ["""sentencepiece"""] )
class _SCREAMING_SNAKE_CASE ( metaclass=a_ ):
UpperCAmelCase_ :Any = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> Optional[int]:
requires_backends(self , ["""sentencepiece"""] )
class _SCREAMING_SNAKE_CASE ( metaclass=a_ ):
UpperCAmelCase_ :int = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> int:
requires_backends(self , ["""sentencepiece"""] )
class _SCREAMING_SNAKE_CASE ( metaclass=a_ ):
UpperCAmelCase_ :int = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> List[Any]:
requires_backends(self , ["""sentencepiece"""] )
class _SCREAMING_SNAKE_CASE ( metaclass=a_ ):
UpperCAmelCase_ :Any = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> Any:
requires_backends(self , ["""sentencepiece"""] )
class _SCREAMING_SNAKE_CASE ( metaclass=a_ ):
UpperCAmelCase_ :Tuple = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> Optional[int]:
requires_backends(self , ["""sentencepiece"""] )
class _SCREAMING_SNAKE_CASE ( metaclass=a_ ):
UpperCAmelCase_ :str = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> Any:
requires_backends(self , ["""sentencepiece"""] )
| 357 |
"""simple docstring"""
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
__UpperCAmelCase = pd.read_csv(
'https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/'
'position_salaries.csv'
)
__UpperCAmelCase = dataset.iloc[:, 1:2].values
__UpperCAmelCase = dataset.iloc[:, 2].values
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = train_test_split(X, y, test_size=0.2, random_state=0)
__UpperCAmelCase = PolynomialFeatures(degree=4)
__UpperCAmelCase = poly_reg.fit_transform(X)
__UpperCAmelCase = LinearRegression()
pol_reg.fit(X_poly, y)
def _snake_case ( ) -> str:
'''simple docstring'''
plt.scatter(lowercase__ , lowercase__ , color="""red""" )
plt.plot(lowercase__ , pol_reg.predict(poly_reg.fit_transform(lowercase__ ) ) , color="""blue""" )
plt.title("""Truth or Bluff (Linear Regression)""" )
plt.xlabel("""Position level""" )
plt.ylabel("""Salary""" )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 1 | 0 |
"""simple docstring"""
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
UpperCamelCase : Optional[int] = logging.get_logger(__name__)
class __lowerCAmelCase ( enum.Enum ):
lowercase = 0
lowercase = 1
@add_end_docstrings(__SCREAMING_SNAKE_CASE )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = "generated"
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == 'tf'
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def UpperCAmelCase ( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = {}
if truncation is not None:
__UpperCamelCase = truncation
__UpperCamelCase = generate_kwargs
__UpperCamelCase = {}
if return_tensors is not None and return_type is None:
__UpperCamelCase = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
__UpperCamelCase = return_type
if clean_up_tokenization_spaces is not None:
__UpperCamelCase = clean_up_tokenization_spaces
if stop_sequence is not None:
__UpperCamelCase = self.tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
if len(__UpperCAmelCase ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
__UpperCamelCase = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
return True
def UpperCAmelCase ( self , *__UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.model.config.prefix if self.model.config.prefix is not None else ''
if isinstance(args[0] , __UpperCAmelCase ):
if self.tokenizer.pad_token_id is None:
raise ValueError('Please make sure that the tokenizer has a pad_token_id when using a batch input' )
__UpperCamelCase = ([prefix + arg for arg in args[0]],)
__UpperCamelCase = True
elif isinstance(args[0] , __UpperCAmelCase ):
__UpperCamelCase = (prefix + args[0],)
__UpperCamelCase = False
else:
raise ValueError(
F' `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`' )
__UpperCamelCase = self.tokenizer(*__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = super().__call__(*__UpperCAmelCase , **__UpperCAmelCase )
if (
isinstance(args[0] , __UpperCAmelCase )
and all(isinstance(__UpperCAmelCase , __UpperCAmelCase ) for el in args[0] )
and all(len(__UpperCAmelCase ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=TruncationStrategy.DO_NOT_TRUNCATE , **__UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self._parse_and_tokenize(__UpperCAmelCase , truncation=__UpperCAmelCase , **__UpperCAmelCase )
return inputs
def UpperCAmelCase ( self , __UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
if self.framework == "pt":
__UpperCamelCase , __UpperCamelCase = model_inputs['input_ids'].shape
elif self.framework == "tf":
__UpperCamelCase , __UpperCamelCase = tf.shape(model_inputs['input_ids'] ).numpy()
__UpperCamelCase = generate_kwargs.get('min_length' , self.model.config.min_length )
__UpperCamelCase = generate_kwargs.get('max_length' , self.model.config.max_length )
self.check_inputs(__UpperCAmelCase , generate_kwargs['min_length'] , generate_kwargs['max_length'] )
__UpperCamelCase = self.model.generate(**__UpperCAmelCase , **__UpperCAmelCase )
__UpperCamelCase = output_ids.shape[0]
if self.framework == "pt":
__UpperCamelCase = output_ids.reshape(__UpperCAmelCase , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
__UpperCamelCase = tf.reshape(__UpperCAmelCase , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=ReturnType.TEXT , __UpperCAmelCase=False ):
'''simple docstring'''
__UpperCamelCase = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
__UpperCamelCase = {F'{self.return_name}_token_ids': output_ids}
elif return_type == ReturnType.TEXT:
__UpperCamelCase = {
F'{self.return_name}_text': self.tokenizer.decode(
__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase , clean_up_tokenization_spaces=__UpperCAmelCase , )
}
records.append(__UpperCAmelCase )
return records
@add_end_docstrings(__SCREAMING_SNAKE_CASE )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = "summary"
def __call__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return super().__call__(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
if max_length < min_length:
logger.warning(F'Your min_length={min_length} must be inferior than your max_length={max_length}.' )
if input_length < max_length:
logger.warning(
F'Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is '
'a summarization task, where outputs shorter than the input are typically wanted, you might '
F'consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})' )
@add_end_docstrings(__SCREAMING_SNAKE_CASE )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = "translation"
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
if input_length > 0.9 * max_length:
logger.warning(
F'Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider '
'increasing your max_length manually, e.g. translator(\'...\', max_length=400)' )
return True
def UpperCAmelCase ( self , *__UpperCAmelCase , __UpperCAmelCase=TruncationStrategy.DO_NOT_TRUNCATE , __UpperCAmelCase=None , __UpperCAmelCase=None ):
'''simple docstring'''
if getattr(self.tokenizer , '_build_translation_inputs' , __UpperCAmelCase ):
return self.tokenizer._build_translation_inputs(
*__UpperCAmelCase , return_tensors=self.framework , truncation=__UpperCAmelCase , src_lang=__UpperCAmelCase , tgt_lang=__UpperCAmelCase )
else:
return super()._parse_and_tokenize(*__UpperCAmelCase , truncation=__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = super()._sanitize_parameters(**__UpperCAmelCase )
if src_lang is not None:
__UpperCamelCase = src_lang
if tgt_lang is not None:
__UpperCamelCase = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
__UpperCamelCase = kwargs.get('task' , self.task )
__UpperCamelCase = task.split('_' )
if task and len(__UpperCAmelCase ) == 4:
# translation, XX, to YY
__UpperCamelCase = items[1]
__UpperCamelCase = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return super().__call__(*__UpperCAmelCase , **__UpperCAmelCase )
| 316 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = 13
__UpperCamelCase = 7
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = 2
__UpperCamelCase = 99
__UpperCamelCase = 0
__UpperCamelCase = 32
__UpperCamelCase = 2
__UpperCamelCase = 4
__UpperCamelCase = 0.1
__UpperCamelCase = 0.1
__UpperCamelCase = 512
__UpperCamelCase = 16
__UpperCamelCase = 2
__UpperCamelCase = 0.0_2
__UpperCamelCase = 3
__UpperCamelCase = 4
__UpperCamelCase = 'last'
__UpperCamelCase = True
__UpperCamelCase = None
__UpperCamelCase = 0
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
__UpperCamelCase = None
if self.use_input_lengths:
__UpperCamelCase = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__UpperCamelCase = None
if self.use_token_type_ids:
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
if self.use_labels:
__UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
__UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = TFFlaubertModel(config=__UpperCAmelCase )
__UpperCamelCase = {'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
__UpperCamelCase = model(__UpperCAmelCase )
__UpperCamelCase = [input_ids, input_mask]
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = TFFlaubertWithLMHeadModel(__UpperCAmelCase )
__UpperCamelCase = {'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = TFFlaubertForQuestionAnsweringSimple(__UpperCAmelCase )
__UpperCamelCase = {'input_ids': input_ids, 'lengths': input_lengths}
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = TFFlaubertForSequenceClassification(__UpperCAmelCase )
__UpperCamelCase = {'input_ids': input_ids, 'lengths': input_lengths}
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = self.num_labels
__UpperCamelCase = TFFlaubertForTokenClassification(config=__UpperCAmelCase )
__UpperCamelCase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = self.num_choices
__UpperCamelCase = TFFlaubertForMultipleChoice(config=__UpperCAmelCase )
__UpperCamelCase = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) = config_and_inputs
__UpperCamelCase = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'langs': token_type_ids,
'lengths': input_lengths,
}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowercase = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
lowercase = (
{
"feature-extraction": TFFlaubertModel,
"fill-mask": TFFlaubertWithLMHeadModel,
"question-answering": TFFlaubertForQuestionAnsweringSimple,
"text-classification": TFFlaubertForSequenceClassification,
"token-classification": TFFlaubertForTokenClassification,
"zero-shot": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowercase = False
lowercase = False
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = TFFlaubertModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=__UpperCAmelCase , emb_dim=37 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*__UpperCAmelCase )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase = TFFlaubertModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@require_tf
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = TFFlaubertModel.from_pretrained('jplu/tf-flaubert-small-cased' )
__UpperCamelCase = tf.convert_to_tensor(
[[0, 158, 735, 2592, 1424, 6727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
__UpperCamelCase = model(__UpperCAmelCase )[0]
__UpperCamelCase = tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape , __UpperCAmelCase )
# compare the actual values for a slice.
__UpperCamelCase = tf.convert_to_tensor(
[
[
[-1.8_7_6_8_7_7_3, -1.5_6_6_5_5_5, 0.2_7_0_7_2_4_1_8],
[-1.6_9_2_0_0_3_8, -0.5_8_7_3_5_0_5, 1.9_3_2_9_5_9_9],
[-2.9_5_6_3_9_8_5, -1.6_9_9_3_8_3_5, 1.7_9_7_2_0_5_2],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 316 | 1 |
from math import sqrt
def lowerCamelCase_ ( UpperCamelCase__ : int ):
'''simple docstring'''
UpperCamelCase__ = 0
for i in range(1, int(sqrt(UpperCamelCase__ ) + 1 ) ):
if n % i == 0 and i != sqrt(UpperCamelCase__ ):
total += i + n // i
elif i == sqrt(UpperCamelCase__ ):
total += i
return total - n
def lowerCamelCase_ ( UpperCamelCase__ : int = 1_0000 ):
'''simple docstring'''
UpperCamelCase__ = sum(
i
for i in range(1, UpperCamelCase__ )
if sum_of_divisors(sum_of_divisors(UpperCamelCase__ ) ) == i and sum_of_divisors(UpperCamelCase__ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 369 | from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
lowercase = [
"""python""",
"""tqdm""",
"""regex""",
"""requests""",
"""packaging""",
"""filelock""",
"""numpy""",
"""tokenizers""",
"""huggingface-hub""",
"""safetensors""",
"""accelerate""",
"""pyyaml""",
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f'can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py')
def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Dict=None ):
'''simple docstring'''
require_version(deps[pkg], UpperCamelCase__ )
| 35 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A__ : Optional[Any] = logging.get_logger(__name__)
A__ : Union[str, Any] = torch.device('cpu')
def a ( ):
'''simple docstring'''
lowercase__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase__ = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw )
return im
def a ( lowerCamelCase_ ):
'''simple docstring'''
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1703e00, 2.1107e00, -2.0811e00, 8.8685e-01, 2.4360e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9636e-01, 2.3478e-01, -1.6963e00, -1.7381e00, -8.6337e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2768e-01, -4.7429e-01, -1.0897e00, -1.0248e00, 3.5523e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5330e-01, 2.4211e-01, -6.0185e-01, -8.2789e-01, -6.0446e-02] )
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = dct.pop(lowerCamelCase_ )
lowercase__ = val
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = []
for k in state_dict.keys():
lowercase__ = k
if ".pwconv" in k:
lowercase__ = k_new.replace('''.pwconv''' , '''.point_wise_conv''' )
if ".dwconv" in k:
lowercase__ = k_new.replace('''.dwconv''' , '''.depth_wise_conv''' )
if ".Proj." in k:
lowercase__ = k_new.replace('''.Proj.''' , '''.proj.''' )
if "patch_embed" in k_new:
lowercase__ = k_new.replace('''patch_embed''' , '''swiftformer.patch_embed.patch_embedding''' )
if "network" in k_new:
lowercase__ = k_new.split('''.''' )
if ls[2].isdigit():
lowercase__ = '''swiftformer.encoder.network.''' + ls[1] + '''.blocks.''' + ls[2] + '''.''' + '''.'''.join(ls[3:] )
else:
lowercase__ = k_new.replace('''network''' , '''swiftformer.encoder.network''' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
lowercase__ = 1000
lowercase__ = '''huggingface/label-files'''
lowercase__ = '''imagenet-1k-id2label.json'''
lowercase__ = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type='''dataset''' ) , '''r''' ) )
lowercase__ = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
lowercase__ = [3, 3, 6, 4]
lowercase__ = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
lowercase__ = [3, 3, 9, 6]
lowercase__ = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
lowercase__ = [4, 3, 10, 5]
lowercase__ = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
lowercase__ = [4, 4, 12, 6]
lowercase__ = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('''https''' ):
lowercase__ = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location='''cpu''' , check_hash=lowerCamelCase_ )
else:
lowercase__ = torch.load(lowerCamelCase_ , map_location='''cpu''' )
lowercase__ = checkpoint
lowercase__ = create_rename_keys(lowerCamelCase_ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# load HuggingFace model
lowercase__ = SwiftFormerForImageClassification(lowerCamelCase_ ).eval()
hf_model.load_state_dict(lowerCamelCase_ )
# prepare test inputs
lowercase__ = prepare_img()
lowercase__ = ViTImageProcessor.from_pretrained('''preprocessor_config''' )
lowercase__ = processor(images=lowerCamelCase_ , return_tensors='''pt''' )
# compare outputs from both models
lowercase__ = get_expected_output(lowerCamelCase_ )
lowercase__ = hf_model(inputs['''pixel_values'''] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5] , lowerCamelCase_ , atol=1e-3 )
Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ )
print(F"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" )
hf_model.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
A__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swiftformer_name',
default='swiftformer_xs',
choices=['swiftformer_xs', 'swiftformer_s', 'swiftformer_l1', 'swiftformer_l3'],
type=str,
help='Name of the SwiftFormer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='./converted_outputs/',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--original_ckpt', default=None, type=str, help='Path to the original model checkpoint.')
A__ : Dict = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 207 |
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class _UpperCAmelCase ( A__ ,A__ ):
"""simple docstring"""
lowercase__ = """pixel_values"""
lowercase__ = False
lowercase__ = TimmBackboneConfig
def __init__( self : Tuple, lowerCamelCase : List[str], **lowerCamelCase : List[str] ):
'''simple docstring'''
requires_backends(self, '''timm''' )
super().__init__(lowerCamelCase )
lowercase__ = config
if config.backbone is None:
raise ValueError('''backbone is not set in the config. Please set it to a timm model name.''' )
if config.backbone not in timm.list_models():
raise ValueError(F"""backbone {config.backbone} is not supported by timm.""" )
if hasattr(lowerCamelCase, '''out_features''' ) and config.out_features is not None:
raise ValueError('''out_features is not supported by TimmBackbone. Please use out_indices instead.''' )
lowercase__ = getattr(lowerCamelCase, '''use_pretrained_backbone''', lowerCamelCase )
if pretrained is None:
raise ValueError('''use_pretrained_backbone is not set in the config. Please set it to True or False.''' )
# We just take the final layer by default. This matches the default for the transformers models.
lowercase__ = config.out_indices if getattr(lowerCamelCase, '''out_indices''', lowerCamelCase ) is not None else (-1,)
lowercase__ = timm.create_model(
config.backbone, pretrained=lowerCamelCase, features_only=config.features_only, in_chans=config.num_channels, out_indices=lowerCamelCase, **lowerCamelCase, )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
lowercase__ = self._backbone.return_layers
lowercase__ = {layer['''module''']: str(lowerCamelCase ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(lowerCamelCase )
@classmethod
def lowercase__ ( cls : List[str], lowerCamelCase : List[str], *lowerCamelCase : Optional[int], **lowerCamelCase : List[Any] ):
'''simple docstring'''
requires_backends(cls, ['''vision''', '''timm'''] )
from ...models.timm_backbone import TimmBackboneConfig
lowercase__ = kwargs.pop('''config''', TimmBackboneConfig() )
lowercase__ = kwargs.pop('''use_timm_backbone''', lowerCamelCase )
if not use_timm:
raise ValueError('''use_timm_backbone must be True for timm backbones''' )
lowercase__ = kwargs.pop('''num_channels''', config.num_channels )
lowercase__ = kwargs.pop('''features_only''', config.features_only )
lowercase__ = kwargs.pop('''use_pretrained_backbone''', config.use_pretrained_backbone )
lowercase__ = kwargs.pop('''out_indices''', config.out_indices )
lowercase__ = TimmBackboneConfig(
backbone=lowerCamelCase, num_channels=lowerCamelCase, features_only=lowerCamelCase, use_pretrained_backbone=lowerCamelCase, out_indices=lowerCamelCase, )
return super()._from_config(lowerCamelCase, **lowerCamelCase )
def lowercase__ ( self : List[Any], lowerCamelCase : Optional[Any] ):
'''simple docstring'''
pass
def lowercase__ ( self : int, lowerCamelCase : int, lowerCamelCase : Optional[int]=None, lowerCamelCase : List[Any]=None, lowerCamelCase : int=None, **lowerCamelCase : Optional[int] ):
'''simple docstring'''
lowercase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase__ = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError('''Cannot output attentions for timm backbones at the moment''' )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
lowercase__ = self._all_layers
lowercase__ = self._backbone(lowerCamelCase, **lowerCamelCase )
lowercase__ = self._return_layers
lowercase__ = tuple(hidden_states[i] for i in self.out_indices )
else:
lowercase__ = self._backbone(lowerCamelCase, **lowerCamelCase )
lowercase__ = None
lowercase__ = tuple(lowerCamelCase )
lowercase__ = tuple(lowerCamelCase ) if hidden_states is not None else None
if not return_dict:
lowercase__ = (feature_maps,)
if output_hidden_states:
lowercase__ = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=lowerCamelCase, hidden_states=lowerCamelCase, attentions=lowerCamelCase )
| 207 | 1 |
"""simple docstring"""
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
A__ : Union[str, Any] = '\\n\n'
A__ : Dict = '\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n'
A__ : Optional[int] = '\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to \'cuda\' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 78.22\n >>> print(round(results["perplexities"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = datasets.load_dataset("wikitext",\n ... "wikitext-2-raw-v1",\n ... split="test")["text"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!=\'\']\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 60.35\n >>> print(round(results["perplexities"][0], 2))\n 81.12\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class lowercase__ ( datasets.Metric ):
def UpperCAmelCase__ ( self : List[str] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"input_texts": datasets.Value("string" ),
} ) , reference_urls=["https://huggingface.co/docs/transformers/perplexity"] , )
def UpperCAmelCase__ ( self : List[Any] , snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : int = 16 , snake_case__ : bool = True , snake_case__ : Dict=None ):
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
lowerCamelCase_ : List[str] ="cuda"
else:
lowerCamelCase_ : Union[str, Any] ="cuda" if torch.cuda.is_available() else "cpu"
lowerCamelCase_ : List[str] =AutoModelForCausalLM.from_pretrained(snake_case__ )
lowerCamelCase_ : Optional[int] =model.to(snake_case__ )
lowerCamelCase_ : int =AutoTokenizer.from_pretrained(snake_case__ )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
lowerCamelCase_ : Optional[int] =list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(snake_case__ ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"pad_token": existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
lowerCamelCase_ : int =model.config.max_length - 1
else:
lowerCamelCase_ : List[str] =model.config.max_length
lowerCamelCase_ : Dict =tokenizer(
snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , return_tensors="pt" , return_attention_mask=snake_case__ , ).to(snake_case__ )
lowerCamelCase_ : List[str] =encodings["input_ids"]
lowerCamelCase_ : Union[str, Any] =encodings["attention_mask"]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
lowerCamelCase_ : int =[]
lowerCamelCase_ : Optional[int] =CrossEntropyLoss(reduction="none" )
for start_index in logging.tqdm(range(0 , len(snake_case__ ) , snake_case__ ) ):
lowerCamelCase_ : int =min(start_index + batch_size , len(snake_case__ ) )
lowerCamelCase_ : Any =encoded_texts[start_index:end_index]
lowerCamelCase_ : Tuple =attn_masks[start_index:end_index]
if add_start_token:
lowerCamelCase_ : List[Any] =torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(snake_case__ )
lowerCamelCase_ : Any =torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
lowerCamelCase_ : Optional[int] =torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(snake_case__ ), attn_mask] , dim=1 )
lowerCamelCase_ : Tuple =encoded_batch
with torch.no_grad():
lowerCamelCase_ : Any =model(snake_case__ , attention_mask=snake_case__ ).logits
lowerCamelCase_ : List[Any] =out_logits[..., :-1, :].contiguous()
lowerCamelCase_ : List[str] =labels[..., 1:].contiguous()
lowerCamelCase_ : Optional[Any] =attn_mask[..., 1:].contiguous()
lowerCamelCase_ : int =torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , snake_case__ ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(snake_case__ )}
| 209 |
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
A__ : List[str] = datasets.utils.logging.get_logger(__name__)
@dataclass
class lowercase__ ( datasets.BuilderConfig ):
_UpperCAmelCase :int = 10000
_UpperCAmelCase :Optional[List[str]] = None
_UpperCAmelCase :Optional[datasets.Features] = None
class lowercase__ ( datasets.ArrowBasedBuilder ):
_UpperCAmelCase :Optional[int] = ParquetConfig
def UpperCAmelCase__ ( self : Optional[int] ):
return datasets.DatasetInfo(features=self.config.features )
def UpperCAmelCase__ ( self : List[Any] , snake_case__ : List[Any] ):
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
lowerCamelCase_ : str =dl_manager.download_and_extract(self.config.data_files )
if isinstance(snake_case__ , (str, list, tuple) ):
lowerCamelCase_ : Dict =data_files
if isinstance(snake_case__ , snake_case__ ):
lowerCamelCase_ : List[Any] =[files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowerCamelCase_ : Any =[dl_manager.iter_files(snake_case__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
lowerCamelCase_ : Optional[int] =[]
for split_name, files in data_files.items():
if isinstance(snake_case__ , snake_case__ ):
lowerCamelCase_ : Optional[int] =[files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowerCamelCase_ : int =[dl_manager.iter_files(snake_case__ ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(snake_case__ ):
with open(snake_case__ , "rb" ) as f:
lowerCamelCase_ : List[Any] =datasets.Features.from_arrow_schema(pq.read_schema(snake_case__ ) )
break
splits.append(datasets.SplitGenerator(name=snake_case__ , gen_kwargs={"files": files} ) )
return splits
def UpperCAmelCase__ ( self : int , snake_case__ : pa.Table ):
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
lowerCamelCase_ : List[str] =table_cast(snake_case__ , self.info.features.arrow_schema )
return pa_table
def UpperCAmelCase__ ( self : int , snake_case__ : Optional[Any] ):
lowerCamelCase_ : Tuple =self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F"""Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'""" )
for file_idx, file in enumerate(itertools.chain.from_iterable(snake_case__ ) ):
with open(snake_case__ , "rb" ) as f:
lowerCamelCase_ : List[str] =pq.ParquetFile(snake_case__ )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
lowerCamelCase_ : Union[str, Any] =pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F"""{file_idx}_{batch_idx}""", self._cast_table(snake_case__ )
except ValueError as e:
logger.error(F"""Failed to read file '{file}' with error {type(snake_case__ )}: {e}""" )
raise
| 209 | 1 |
def __lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : bool = False ):
"""simple docstring"""
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
a :Tuple = F'''Expected string as input, found {type(UpperCAmelCase_ )}'''
raise ValueError(UpperCAmelCase_ )
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
a :List[Any] = F'''Expected boolean as use_pascal parameter, found {type(UpperCAmelCase_ )}'''
raise ValueError(UpperCAmelCase_ )
a :int = input_str.split('''_''' )
a :Any = 0 if use_pascal else 1
a :int = words[start_index:]
a :Tuple = [word[0].upper() + word[1:] for word in words_to_capitalize]
a :Optional[int] = '''''' if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 94 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = 'ClapFeatureExtractor'
SCREAMING_SNAKE_CASE__ = ('RobertaTokenizer', 'RobertaTokenizerFast')
def __init__( self , _lowerCamelCase , _lowerCamelCase ):
super().__init__(_lowerCamelCase , _lowerCamelCase )
def __call__( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , **_lowerCamelCase ):
a :Dict = kwargs.pop('''sampling_rate''' , _lowerCamelCase )
if text is None and audios is None:
raise ValueError('''You have to specify either text or audios. Both cannot be none.''' )
if text is not None:
a :Optional[int] = self.tokenizer(_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase )
if audios is not None:
a :Tuple = self.feature_extractor(
_lowerCamelCase , sampling_rate=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase )
if text is not None and audios is not None:
a :Union[str, Any] = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_lowerCamelCase ) , tensor_type=_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , *_lowerCamelCase , **_lowerCamelCase ):
return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , *_lowerCamelCase , **_lowerCamelCase ):
return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
a :List[Any] = self.tokenizer.model_input_names
a :str = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 94 | 1 |
"""simple docstring"""
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def lowerCAmelCase_( lowercase_ : Tuple ) -> Optional[Any]:
return EnvironmentCommand()
def lowerCAmelCase_( lowercase_ : str ) -> str:
return EnvironmentCommand(args.accelerate_config_file )
class lowerCamelCase_( A__ ):
'''simple docstring'''
@staticmethod
def snake_case__ ( lowerCamelCase__ ):
_lowerCamelCase = parser.add_parser('''env''' )
download_parser.set_defaults(func=lowerCamelCase__ )
download_parser.add_argument(
'''--accelerate-config_file''' , default=lowerCamelCase__ , help='''The accelerate config file to use for the default values in the launching script.''' , )
download_parser.set_defaults(func=lowerCamelCase__ )
def __init__( self , lowerCamelCase__ , *lowerCamelCase__ ):
_lowerCamelCase = accelerate_config_file
def snake_case__ ( self ):
_lowerCamelCase = '''not installed'''
if is_safetensors_available():
import safetensors
_lowerCamelCase = safetensors.__version__
elif importlib.util.find_spec('''safetensors''' ) is not None:
import safetensors
_lowerCamelCase = F"""{safetensors.__version__} but is ignored because of PyTorch version too old."""
_lowerCamelCase = '''not installed'''
_lowerCamelCase = _lowerCamelCase = '''not found'''
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
_lowerCamelCase = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(lowerCamelCase__ ):
_lowerCamelCase = load_config_from_file(self._accelerate_config_file ).to_dict()
_lowerCamelCase = (
'''\n'''.join([F"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] )
if isinstance(lowerCamelCase__ , lowerCamelCase__ )
else F"""\t{accelerate_config}"""
)
_lowerCamelCase = '''not installed'''
_lowerCamelCase = '''NA'''
if is_torch_available():
import torch
_lowerCamelCase = torch.__version__
_lowerCamelCase = torch.cuda.is_available()
_lowerCamelCase = '''not installed'''
_lowerCamelCase = '''NA'''
if is_tf_available():
import tensorflow as tf
_lowerCamelCase = tf.__version__
try:
# deprecated in v2.1
_lowerCamelCase = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
_lowerCamelCase = bool(tf.config.list_physical_devices('''GPU''' ) )
_lowerCamelCase = '''not installed'''
_lowerCamelCase = '''not installed'''
_lowerCamelCase = '''not installed'''
_lowerCamelCase = '''NA'''
if is_flax_available():
import flax
import jax
import jaxlib
_lowerCamelCase = flax.__version__
_lowerCamelCase = jax.__version__
_lowerCamelCase = jaxlib.__version__
_lowerCamelCase = jax.lib.xla_bridge.get_backend().platform
_lowerCamelCase = {
'''`transformers` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''Huggingface_hub version''': huggingface_hub.__version__,
'''Safetensors version''': F"""{safetensors_version}""",
'''Accelerate version''': F"""{accelerate_version}""",
'''Accelerate config''': F"""{accelerate_config_str}""",
'''PyTorch version (GPU?)''': F"""{pt_version} ({pt_cuda_available})""",
'''Tensorflow version (GPU?)''': F"""{tf_version} ({tf_cuda_available})""",
'''Flax version (CPU?/GPU?/TPU?)''': F"""{flax_version} ({jax_backend})""",
'''Jax version''': F"""{jax_version}""",
'''JaxLib version''': F"""{jaxlib_version}""",
'''Using GPU in script?''': '''<fill in>''',
'''Using distributed or parallel set-up in script?''': '''<fill in>''',
}
print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' )
print(self.format_dict(lowerCamelCase__ ) )
return info
@staticmethod
def snake_case__ ( lowerCamelCase__ ):
return "\n".join([F"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 370 |
"""simple docstring"""
import os
import sys
__SCREAMING_SNAKE_CASE : Dict = os.path.join(os.path.dirname(__file__), '''src''')
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
__SCREAMING_SNAKE_CASE : List[Any] = [
'''torch''',
'''numpy''',
'''tokenizers''',
'''filelock''',
'''requests''',
'''tqdm''',
'''regex''',
'''sentencepiece''',
'''sacremoses''',
'''importlib_metadata''',
'''huggingface_hub''',
]
@add_start_docstrings(AutoConfig.__doc__ )
def lowerCAmelCase_( *lowercase_ : Any , **lowercase_ : Optional[Any] ) -> Optional[Any]:
return AutoConfig.from_pretrained(*lowercase_ , **lowercase_ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def lowerCAmelCase_( *lowercase_ : List[Any] , **lowercase_ : List[Any] ) -> Tuple:
return AutoTokenizer.from_pretrained(*lowercase_ , **lowercase_ )
@add_start_docstrings(AutoModel.__doc__ )
def lowerCAmelCase_( *lowercase_ : Optional[Any] , **lowercase_ : Optional[Any] ) -> int:
return AutoModel.from_pretrained(*lowercase_ , **lowercase_ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def lowerCAmelCase_( *lowercase_ : Dict , **lowercase_ : List[Any] ) -> int:
return AutoModelForCausalLM.from_pretrained(*lowercase_ , **lowercase_ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def lowerCAmelCase_( *lowercase_ : str , **lowercase_ : List[Any] ) -> Any:
return AutoModelForMaskedLM.from_pretrained(*lowercase_ , **lowercase_ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def lowerCAmelCase_( *lowercase_ : Tuple , **lowercase_ : List[Any] ) -> Dict:
return AutoModelForSequenceClassification.from_pretrained(*lowercase_ , **lowercase_ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def lowerCAmelCase_( *lowercase_ : List[str] , **lowercase_ : List[str] ) -> str:
return AutoModelForQuestionAnswering.from_pretrained(*lowercase_ , **lowercase_ )
| 73 | 0 |
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Optional[Any] = 'char'
UpperCAmelCase__ : List[str] = 'bpe'
UpperCAmelCase__ : int = 'wp'
UpperCAmelCase_ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : List[Any] = ['image_processor', 'char_tokenizer']
UpperCAmelCase__ : Tuple = 'ViTImageProcessor'
UpperCAmelCase__ : Optional[int] = 'MgpstrTokenizer'
def __init__( self: Union[str, Any] , UpperCamelCase_: str=None , UpperCamelCase_: Optional[int]=None , **UpperCamelCase_: Dict ):
__lowerCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , UpperCamelCase_ , )
__lowerCamelCase = kwargs.pop("""feature_extractor""" )
__lowerCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
__lowerCamelCase = tokenizer
__lowerCamelCase = AutoTokenizer.from_pretrained("""gpt2""" )
__lowerCamelCase = AutoTokenizer.from_pretrained("""bert-base-uncased""" )
super().__init__(UpperCamelCase_ , UpperCamelCase_ )
def __call__( self: Tuple , UpperCamelCase_: Optional[int]=None , UpperCamelCase_: List[str]=None , UpperCamelCase_: Any=None , **UpperCamelCase_: Any ):
if images is None and text is None:
raise ValueError("""You need to specify either an `images` or `text` input to process.""" )
if images is not None:
__lowerCamelCase = self.image_processor(UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ )
if text is not None:
__lowerCamelCase = self.char_tokenizer(UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ )
if text is None:
return inputs
elif images is None:
return encodings
else:
__lowerCamelCase = encodings["""input_ids"""]
return inputs
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: int ):
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase = sequences
__lowerCamelCase = char_preds.size(0 )
__lowerCamelCase, __lowerCamelCase = self._decode_helper(UpperCamelCase_ , """char""" )
__lowerCamelCase, __lowerCamelCase = self._decode_helper(UpperCamelCase_ , """bpe""" )
__lowerCamelCase, __lowerCamelCase = self._decode_helper(UpperCamelCase_ , """wp""" )
__lowerCamelCase = []
__lowerCamelCase = []
for i in range(UpperCamelCase_ ):
__lowerCamelCase = [char_scores[i], bpe_scores[i], wp_scores[i]]
__lowerCamelCase = [char_strs[i], bpe_strs[i], wp_strs[i]]
__lowerCamelCase = scores.index(max(UpperCamelCase_ ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
__lowerCamelCase = {}
__lowerCamelCase = final_strs
__lowerCamelCase = final_scores
__lowerCamelCase = char_strs
__lowerCamelCase = bpe_strs
__lowerCamelCase = wp_strs
return out
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: str , UpperCamelCase_: Optional[Any] ):
if format == DecodeType.CHARACTER:
__lowerCamelCase = self.char_decode
__lowerCamelCase = 1
__lowerCamelCase = """[s]"""
elif format == DecodeType.BPE:
__lowerCamelCase = self.bpe_decode
__lowerCamelCase = 2
__lowerCamelCase = """#"""
elif format == DecodeType.WORDPIECE:
__lowerCamelCase = self.wp_decode
__lowerCamelCase = 1_02
__lowerCamelCase = """[SEP]"""
else:
raise ValueError(F'Format {format} is not supported.' )
__lowerCamelCase, __lowerCamelCase = [], []
__lowerCamelCase = pred_logits.size(0 )
__lowerCamelCase = pred_logits.size(1 )
__lowerCamelCase, __lowerCamelCase = pred_logits.topk(1 , dim=-1 , largest=UpperCamelCase_ , sorted=UpperCamelCase_ )
__lowerCamelCase = preds_index.view(-1 , UpperCamelCase_ )[:, 1:]
__lowerCamelCase = decoder(UpperCamelCase_ )
__lowerCamelCase, __lowerCamelCase = torch.nn.functional.softmax(UpperCamelCase_ , dim=2 ).max(dim=2 )
__lowerCamelCase = preds_max_prob[:, 1:]
for index in range(UpperCamelCase_ ):
__lowerCamelCase = preds_str[index].find(UpperCamelCase_ )
__lowerCamelCase = preds_str[index][:pred_eos]
__lowerCamelCase = preds_index[index].cpu().tolist()
__lowerCamelCase = pred_index.index(UpperCamelCase_ ) if eos_token in pred_index else -1
__lowerCamelCase = preds_max_prob[index][: pred_eos_index + 1]
__lowerCamelCase = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(UpperCamelCase_ )
conf_scores.append(UpperCamelCase_ )
return dec_strs, conf_scores
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Union[str, Any] ):
__lowerCamelCase = [seq.replace(""" """ , """""" ) for seq in self.char_tokenizer.batch_decode(UpperCamelCase_ )]
return decode_strs
def lowerCAmelCase__ ( self: int , UpperCamelCase_: Tuple ):
return self.bpe_tokenizer.batch_decode(UpperCamelCase_ )
def lowerCAmelCase__ ( self: str , UpperCamelCase_: Dict ):
__lowerCamelCase = [seq.replace(""" """ , """""" ) for seq in self.wp_tokenizer.batch_decode(UpperCamelCase_ )]
return decode_strs
| 12 |
'''simple docstring'''
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=[] ):
"""simple docstring"""
_lowerCAmelCase = size[0] - overlap_pixels * 2
_lowerCAmelCase = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
_lowerCAmelCase = np.ones((size_y, size_x) , dtype=np.uinta ) * 2_55
_lowerCAmelCase = np.pad(lowerCAmelCase , mode="""linear_ramp""" , pad_width=lowerCAmelCase , end_values=0 )
if "l" in remove_borders:
_lowerCAmelCase = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
_lowerCAmelCase = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
_lowerCAmelCase = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
_lowerCAmelCase = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
return max(lowerCAmelCase , min(lowerCAmelCase , lowerCAmelCase ) )
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = list(lowerCAmelCase )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
_lowerCAmelCase = clamp_rect(lowerCAmelCase , [0, 0] , [image_size[0], image_size[1]] )
return rect
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = Image.new("""RGB""" , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(lowerCAmelCase , (original_slice, 0) )
return result
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
_lowerCAmelCase = tile.crop(lowerCAmelCase )
return tile
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = n % d
return n - divisor
class UpperCAmelCase ( snake_case_ ):
def __init__( self : List[Any] , __snake_case : AutoencoderKL , __snake_case : CLIPTextModel , __snake_case : CLIPTokenizer , __snake_case : UNetaDConditionModel , __snake_case : DDPMScheduler , __snake_case : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __snake_case : int = 3_50 , ) -> int:
super().__init__(
vae=__snake_case , text_encoder=__snake_case , tokenizer=__snake_case , unet=__snake_case , low_res_scheduler=__snake_case , scheduler=__snake_case , max_noise_level=__snake_case , )
def lowercase__ ( self : List[Any] , __snake_case : List[Any] , __snake_case : List[Any] , __snake_case : Dict , __snake_case : List[Any] , __snake_case : int , __snake_case : Optional[Any] , __snake_case : Any , **__snake_case : str ) -> int:
torch.manual_seed(0 )
_lowerCAmelCase = (
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
_lowerCAmelCase = add_overlap_rect(__snake_case , __snake_case , image.size )
_lowerCAmelCase = image.crop(__snake_case )
_lowerCAmelCase = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
_lowerCAmelCase = translated_slice_x - (original_image_slice / 2)
_lowerCAmelCase = max(0 , __snake_case )
_lowerCAmelCase = squeeze_tile(__snake_case , __snake_case , __snake_case , __snake_case )
_lowerCAmelCase = to_input.size
_lowerCAmelCase = to_input.resize((tile_size, tile_size) , Image.BICUBIC )
_lowerCAmelCase = super(__snake_case , self ).__call__(image=__snake_case , **__snake_case ).images[0]
_lowerCAmelCase = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
_lowerCAmelCase = unsqueeze_tile(__snake_case , __snake_case )
_lowerCAmelCase = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
_lowerCAmelCase = []
if x == 0:
remove_borders.append("""l""" )
elif crop_rect[2] == image.size[0]:
remove_borders.append("""r""" )
if y == 0:
remove_borders.append("""t""" )
elif crop_rect[3] == image.size[1]:
remove_borders.append("""b""" )
_lowerCAmelCase = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=__snake_case ) , mode="""L""" , )
final_image.paste(
__snake_case , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , __snake_case )
@torch.no_grad()
def __call__( self : Union[str, Any] , __snake_case : Union[str, List[str]] , __snake_case : Union[PIL.Image.Image, List[PIL.Image.Image]] , __snake_case : int = 75 , __snake_case : float = 9.0 , __snake_case : int = 50 , __snake_case : Optional[Union[str, List[str]]] = None , __snake_case : Optional[int] = 1 , __snake_case : float = 0.0 , __snake_case : Optional[torch.Generator] = None , __snake_case : Optional[torch.FloatTensor] = None , __snake_case : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __snake_case : int = 1 , __snake_case : int = 1_28 , __snake_case : int = 32 , __snake_case : int = 32 , ) -> str:
_lowerCAmelCase = Image.new("""RGB""" , (image.size[0] * 4, image.size[1] * 4) )
_lowerCAmelCase = math.ceil(image.size[0] / tile_size )
_lowerCAmelCase = math.ceil(image.size[1] / tile_size )
_lowerCAmelCase = tcx * tcy
_lowerCAmelCase = 0
for y in range(__snake_case ):
for x in range(__snake_case ):
self._process_tile(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , prompt=__snake_case , num_inference_steps=__snake_case , guidance_scale=__snake_case , noise_level=__snake_case , negative_prompt=__snake_case , num_images_per_prompt=__snake_case , eta=__snake_case , generator=__snake_case , latents=__snake_case , )
current_count += 1
if callback is not None:
callback({"""progress""": current_count / total_tile_count, """image""": final_image} )
return final_image
def UpperCamelCase__ ( ):
"""simple docstring"""
_lowerCAmelCase = """stabilityai/stable-diffusion-x4-upscaler"""
_lowerCAmelCase = StableDiffusionTiledUpscalePipeline.from_pretrained(lowerCAmelCase , revision="""fp16""" , torch_dtype=torch.floataa )
_lowerCAmelCase = pipe.to("""cuda""" )
_lowerCAmelCase = Image.open("""../../docs/source/imgs/diffusers_library.jpg""" )
def callback(lowerCAmelCase ):
print(f"progress: {obj['progress']:.4f}" )
obj["image"].save("""diffusers_library_progress.jpg""" )
_lowerCAmelCase = pipe(image=lowerCAmelCase , prompt="""Black font, white background, vector""" , noise_level=40 , callback=lowerCAmelCase )
final_image.save("""diffusers_library.jpg""" )
if __name__ == "__main__":
main()
| 70 | 0 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int ):
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError('check_bouncy() accepts only integer arguments' )
lowerCAmelCase = str(_UpperCAmelCase )
lowerCAmelCase = ''.join(sorted(_UpperCAmelCase ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : float = 99 ):
if not 0 < percent < 100:
raise ValueError('solution() only accepts values from 0 to 100' )
lowerCAmelCase = 0
lowerCAmelCase = 1
while True:
if check_bouncy(_UpperCAmelCase ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f'''{solution(99)}''')
| 309 |
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
__UpperCamelCase : str = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
__UpperCamelCase : Optional[Any] = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
__UpperCamelCase : Dict = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str ):
lowerCAmelCase = len([g for position, g in enumerate(_UpperCAmelCase ) if g == main_target[position]] )
return (item, float(_UpperCAmelCase ))
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str ):
lowerCAmelCase = random.randint(0 , len(_UpperCAmelCase ) - 1 )
lowerCAmelCase = parent_a[:random_slice] + parent_a[random_slice:]
lowerCAmelCase = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : list[str] ):
lowerCAmelCase = list(_UpperCAmelCase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
lowerCAmelCase = random.choice(_UpperCAmelCase )
return "".join(_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : tuple[str, float] , _UpperCAmelCase : list[tuple[str, float]] , _UpperCAmelCase : list[str] , ):
lowerCAmelCase = []
# Generate more children proportionally to the fitness score.
lowerCAmelCase = int(parent_a[1] * 100 ) + 1
lowerCAmelCase = 10 if child_n >= 10 else child_n
for _ in range(_UpperCAmelCase ):
lowerCAmelCase = population_score[random.randint(0 , _UpperCAmelCase )][0]
lowerCAmelCase ,lowerCAmelCase = crossover(parent_a[0] , _UpperCAmelCase )
# Append new string to the population list.
pop.append(mutate(_UpperCAmelCase , _UpperCAmelCase ) )
pop.append(mutate(_UpperCAmelCase , _UpperCAmelCase ) )
return pop
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : list[str] , _UpperCAmelCase : bool = True ):
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
lowerCAmelCase = F'{N_POPULATION} must be bigger than {N_SELECTED}'
raise ValueError(_UpperCAmelCase )
# Verify that the target contains no genes besides the ones inside genes variable.
lowerCAmelCase = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
lowerCAmelCase = F'{not_in_genes_list} is not in genes list, evolution cannot converge'
raise ValueError(_UpperCAmelCase )
# Generate random starting population.
lowerCAmelCase = []
for _ in range(_UpperCAmelCase ):
population.append(''.join([random.choice(_UpperCAmelCase ) for i in range(len(_UpperCAmelCase ) )] ) )
# Just some logs to know what the algorithms is doing.
lowerCAmelCase ,lowerCAmelCase = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(_UpperCAmelCase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
lowerCAmelCase = [evaluate(_UpperCAmelCase , _UpperCAmelCase ) for item in population]
# Check if there is a matching evolution.
lowerCAmelCase = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x[1] , reverse=_UpperCAmelCase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'\nGeneration: {generation}'
F'\nTotal Population:{total_population}'
F'\nBest score: {population_score[0][1]}'
F'\nBest string: {population_score[0][0]}' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
lowerCAmelCase = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(_UpperCAmelCase )
# Normalize population score to be between 0 and 1.
lowerCAmelCase = [
(item, score / len(_UpperCAmelCase )) for item, score in population_score
]
# This is selection
for i in range(_UpperCAmelCase ):
population.extend(select(population_score[int(_UpperCAmelCase )] , _UpperCAmelCase , _UpperCAmelCase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(_UpperCAmelCase ) > N_POPULATION:
break
if __name__ == "__main__":
__UpperCamelCase : Tuple = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
__UpperCamelCase : str = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase : Dict = basic(target_str, genes_list)
print(
f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 309 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_lowercase = {
'''configuration_groupvit''': [
'''GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''GroupViTConfig''',
'''GroupViTOnnxConfig''',
'''GroupViTTextConfig''',
'''GroupViTVisionConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GroupViTModel''',
'''GroupViTPreTrainedModel''',
'''GroupViTTextModel''',
'''GroupViTVisionModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFGroupViTModel''',
'''TFGroupViTPreTrainedModel''',
'''TFGroupViTTextModel''',
'''TFGroupViTVisionModel''',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 74 | '''simple docstring'''
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
SCREAMING_SNAKE_CASE_: Optional[int] =logging.getLogger()
SCREAMING_SNAKE_CASE_: Union[str, Any] =logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __A ( UpperCamelCase__ ):
def _lowercase (self : Optional[Any] , __a : str ):
os.makedirs(__a , exist_ok=__a )
UpperCAmelCase_ = {"source": "What is love ?", "target": "life"}
UpperCAmelCase_ = {"train": 12, "val": 2, "test": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
UpperCAmelCase_ = "\n".join([contents[field]] * n_lines[split] )
with open(os.path.join(__a , f"""{split}.{field}""" ) , "w" ) as f:
f.write(__a )
def _lowercase (self : Optional[int] , __a : int , __a : str = "pytorch" ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = os.path.join(__a , "output" )
UpperCAmelCase_ = os.path.join(__a , "data" )
self._create_dummy_data(data_dir=__a )
UpperCAmelCase_ = f"""
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
""".split()
if gpus > 0:
testargs.append(f"""--gpus={gpus}""" )
if is_apex_available():
testargs.append("--fp16" )
else:
testargs.append("--gpus=0" )
testargs.append("--distributed_backend=ddp_cpu" )
testargs.append("--num_processes=2" )
UpperCAmelCase_ = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(__a , env=self.get_env() )
UpperCAmelCase_ = os.path.join(__a , "metrics.json" )
with open(__a ) as f:
UpperCAmelCase_ = json.load(__a )
return result
@require_torch_gpu
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_multi_gpu
def _lowercase (self : Dict ):
UpperCAmelCase_ = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_gpu
@require_ray
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = self._run_finetune(gpus=1 , distributed_retriever="ray" )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_multi_gpu
@require_ray
def _lowercase (self : Any ):
UpperCAmelCase_ = self._run_finetune(gpus=1 , distributed_retriever="ray" )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
| 1 | 0 |
'''simple docstring'''
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
a__ : Optional[int] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['memory_attention', 'encoder_attn'],
['attention', 'attn'],
['/', '.'],
['.LayerNorm.gamma', '_layer_norm.weight'],
['.LayerNorm.beta', '_layer_norm.bias'],
['r.layer_', 'r.layers.'],
['output_proj', 'out_proj'],
['ffn.dense_1.', 'fc2.'],
['ffn.dense.', 'fc1.'],
['ffn_layer_norm', 'final_layer_norm'],
['kernel', 'weight'],
['encoder_layer_norm.', 'encoder.layer_norm.'],
['decoder_layer_norm.', 'decoder.layer_norm.'],
['embeddings.weights', 'shared.weight'],
]
def _lowercase ( __A ):
'''simple docstring'''
for pegasus_name, hf_name in PATTERNS:
__UpperCamelCase = k.replace(__A ,__A )
return k
def _lowercase ( __A ,__A ):
'''simple docstring'''
__UpperCamelCase = DEFAULTS.copy()
cfg_kwargs.update(__A )
__UpperCamelCase = PegasusConfig(**__A )
__UpperCamelCase = PegasusForConditionalGeneration(__A )
__UpperCamelCase = torch_model.model.state_dict()
__UpperCamelCase = {}
for k, v in tf_weights.items():
__UpperCamelCase = rename_state_dict_key(__A )
if new_k not in sd:
raise ValueError(f"could not find new key {new_k} in state dict. (converted from {k})" )
if "dense" in k or "proj" in new_k:
__UpperCamelCase = v.T
__UpperCamelCase = torch.tensor(__A ,dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f"{new_k}, {k}, {v.shape}, {sd[new_k].shape}"
# make sure embedding.padding_idx is respected
__UpperCamelCase = torch.zeros_like(mapping["""shared.weight"""][cfg.pad_token_id + 1] )
__UpperCamelCase = mapping["""shared.weight"""]
__UpperCamelCase = mapping["""shared.weight"""]
__UpperCamelCase = {k: torch.zeros_like(__A ) for k, v in sd.items() if k.endswith("""bias""" ) and k not in mapping}
mapping.update(**__A )
__UpperCamelCase , __UpperCamelCase = torch_model.model.load_state_dict(__A ,strict=__A )
__UpperCamelCase = [
k for k in missing if k not in ["""encoder.embed_positions.weight""", """decoder.embed_positions.weight"""]
]
assert unexpected_missing == [], f"no matches found for the following torch keys {unexpected_missing}"
assert extra == [], f"no matches found for the following tf keys {extra}"
return torch_model
def _lowercase ( __A="./ckpt/aeslc/model.ckpt-32000" ):
'''simple docstring'''
__UpperCamelCase = tf.train.list_variables(__A )
__UpperCamelCase = {}
__UpperCamelCase = ["""Adafactor""", """global_step"""]
for name, shape in tqdm(__A ,desc="""converting tf checkpoint to dict""" ):
__UpperCamelCase = any(pat in name for pat in ignore_name )
if skip_key:
continue
__UpperCamelCase = tf.train.load_variable(__A ,__A )
__UpperCamelCase = array
return tf_weights
def _lowercase ( __A ,__A ):
'''simple docstring'''
__UpperCamelCase = Path(__A ).parent.name
__UpperCamelCase = task_specific_params[f"summarization_{dataset}"]["""max_position_embeddings"""]
__UpperCamelCase = PegasusTokenizer.from_pretrained("""sshleifer/pegasus""" ,model_max_length=__A )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(__A )
# convert model
__UpperCamelCase = get_tf_weights_as_numpy(__A )
__UpperCamelCase = task_specific_params[f"summarization_{dataset}"]
if dataset == "large":
__UpperCamelCase = task_specific_params
__UpperCamelCase = convert_pegasus(__A ,__A )
torch_model.save_pretrained(__A )
__UpperCamelCase = torch_model.state_dict()
sd.pop("""model.decoder.embed_positions.weight""" )
sd.pop("""model.encoder.embed_positions.weight""" )
torch.save(__A ,Path(__A ) / """pytorch_model.bin""" )
if __name__ == "__main__":
a__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('save_dir', default=None, type=str, help='Path to the output PyTorch model.')
a__ : Tuple = parser.parse_args()
if args.save_dir is None:
a__ : Optional[int] = Path(args.tf_ckpt_path).parent.name
a__ : Union[str, Any] = os.path.join('pegasus', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 243 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class UpperCAmelCase__ :
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = None
# Automatically constructed
__SCREAMING_SNAKE_CASE = "dict"
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = field(default='''Translation''' , init=UpperCAmelCase_ , repr=UpperCAmelCase_)
def __call__( self ) -> Optional[Any]:
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def __lowerCamelCase ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return {k: Value("""string""" ) for k in sorted(self.languages )}
@dataclass
class UpperCAmelCase__ :
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
# Automatically constructed
__SCREAMING_SNAKE_CASE = "dict"
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = field(default='''TranslationVariableLanguages''' , init=UpperCAmelCase_ , repr=UpperCAmelCase_)
def __lowerCamelCase ( self ) -> Dict:
__UpperCamelCase = sorted(set(self.languages ) ) if self.languages else None
__UpperCamelCase = len(self.languages ) if self.languages else None
def __call__( self ) -> Any:
return pa.struct({"""language""": pa.list_(pa.string() ), """translation""": pa.list_(pa.string() )} )
def __lowerCamelCase ( self , lowercase ) -> Any:
__UpperCamelCase = set(self.languages )
if self.languages and set(lowercase ) - lang_set:
raise ValueError(
f"Some languages in example ({', '.join(sorted(set(lowercase ) - lang_set ) )}) are not in valid set ({', '.join(lowercase )})." )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
__UpperCamelCase = []
for lang, text in translation_dict.items():
if isinstance(lowercase , lowercase ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
__UpperCamelCase , __UpperCamelCase = zip(*sorted(lowercase ) )
return {"language": languages, "translation": translations}
def __lowerCamelCase ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Sequence, Value
return {
"language": Sequence(Value("""string""" ) ),
"translation": Sequence(Value("""string""" ) ),
}
| 243 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : str = logging.get_logger(__name__)
__lowercase : Dict = {
'''SCUT-DLVCLab/lilt-roberta-en-base''': (
'''https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json'''
),
}
class __lowercase ( _lowercase ):
lowerCamelCase : List[Any] = "lilt"
def __init__(self , A=3_0_5_2_2 , A=7_6_8 , A=1_2 , A=1_2 , A=3_0_7_2 , A="gelu" , A=0.1 , A=0.1 , A=5_1_2 , A=2 , A=0.02 , A=1E-12 , A=0 , A="absolute" , A=None , A=4 , A=1_0_2_4 , **A , ):
super().__init__(pad_token_id=A , **A )
lowerCamelCase_ : Dict = vocab_size
lowerCamelCase_ : List[str] = hidden_size
lowerCamelCase_ : List[str] = num_hidden_layers
lowerCamelCase_ : List[str] = num_attention_heads
lowerCamelCase_ : Optional[Any] = hidden_act
lowerCamelCase_ : List[str] = intermediate_size
lowerCamelCase_ : str = hidden_dropout_prob
lowerCamelCase_ : Union[str, Any] = attention_probs_dropout_prob
lowerCamelCase_ : int = max_position_embeddings
lowerCamelCase_ : Any = type_vocab_size
lowerCamelCase_ : Any = initializer_range
lowerCamelCase_ : Any = layer_norm_eps
lowerCamelCase_ : Optional[int] = position_embedding_type
lowerCamelCase_ : Tuple = classifier_dropout
lowerCamelCase_ : str = channel_shrink_ratio
lowerCamelCase_ : str = max_ad_position_embeddings
| 318 |
'''simple docstring'''
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowercase : List[str] = get_tests_dir('''fixtures/test_sentencepiece_no_bos.model''')
@require_sentencepiece
@require_tokenizers
class __lowercase ( _lowercase , unittest.TestCase ):
lowerCamelCase : Any = PegasusTokenizer
lowerCamelCase : Optional[Any] = PegasusTokenizerFast
lowerCamelCase : Union[str, Any] = True
lowerCamelCase : Union[str, Any] = True
def UpperCAmelCase__ (self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase_ : Optional[int] = PegasusTokenizer(A )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCAmelCase__ (self ):
return PegasusTokenizer.from_pretrained('''google/pegasus-large''' )
def UpperCAmelCase__ (self , **A ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self , A ):
return ("This is a test", "This is a test")
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = '''</s>'''
lowerCamelCase_ : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''</s>''' )
self.assertEqual(vocab_keys[-1] , '''v''' )
self.assertEqual(len(A ) , 1_1_0_3 )
def UpperCAmelCase__ (self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_1_0_3 )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCamelCase_ : Optional[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCamelCase_ : str = (
'''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'''
''' </s> <pad> <pad> <pad>'''
)
lowerCamelCase_ : Any = rust_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
lowerCamelCase_ : Optional[int] = py_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
self.assertListEqual(A , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
lowerCamelCase_ : Union[str, Any] = '''<mask_1> To ensure a <mask_2> flow of bank resolutions.'''
lowerCamelCase_ : Any = [2, 4_1_3, 6_1_5, 1_1_4, 3, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1]
lowerCamelCase_ : List[Any] = tokenizer([raw_input_str] , return_tensors=A ).input_ids[0]
self.assertListEqual(A , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6_1_0_3
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 1_0_3
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_0_5
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1_0_2_4
lowerCamelCase_ : Optional[Any] = '''To ensure a smooth flow of bank resolutions.'''
lowerCamelCase_ : Tuple = [4_1_3, 6_1_5, 1_1_4, 2_2_9_1, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1]
lowerCamelCase_ : str = tokenizer([raw_input_str] , return_tensors=A ).input_ids[0]
self.assertListEqual(A , A )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = ['''This is going to be way too long.''' * 1_5_0, '''short example''']
lowerCamelCase_ : int = ['''not super long but more than 5 tokens''', '''tiny''']
lowerCamelCase_ : List[Any] = self._large_tokenizer(A , padding=A , truncation=A , return_tensors='''pt''' )
lowerCamelCase_ : Dict = self._large_tokenizer(
text_target=A , max_length=5 , padding=A , truncation=A , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 1_0_2_4)
assert batch.attention_mask.shape == (2, 1_0_2_4)
assert targets["input_ids"].shape == (2, 5)
assert len(A ) == 2 # input_ids, attention_mask.
@slow
def UpperCAmelCase__ (self ):
# fmt: off
lowerCamelCase_ : int = {'''input_ids''': [[3_8_9_7_9, 1_4_3, 1_8_4_8_5, 6_0_6, 1_3_0, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 5_4_1_8_9, 1_1_2_9, 1_1_1, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 9_1_1_4, 1_4_7_8_7, 1_2_1, 1_3_2_4_9, 1_5_8, 5_9_2, 9_5_6, 1_2_1, 1_4_6_2_1, 3_1_5_7_6, 1_4_3, 6_2_6_1_3, 1_0_8, 9_6_8_8, 9_3_0, 4_3_4_3_0, 1_1_5_6_2, 6_2_6_1_3, 3_0_4, 1_0_8, 1_1_4_4_3, 8_9_7, 1_0_8, 9_3_1_4, 1_7_4_1_5, 6_3_3_9_9, 1_0_8, 1_1_4_4_3, 7_6_1_4, 1_8_3_1_6, 1_1_8, 4_2_8_4, 7_1_4_8, 1_2_4_3_0, 1_4_3, 1_4_0_0, 2_5_7_0_3, 1_5_8, 1_1_1, 4_2_8_4, 7_1_4_8, 1_1_7_7_2, 1_4_3, 2_1_2_9_7, 1_0_6_4, 1_5_8, 1_2_2, 2_0_4, 3_5_0_6, 1_7_5_4, 1_1_3_3, 1_4_7_8_7, 1_5_8_1, 1_1_5, 3_3_2_2_4, 4_4_8_2, 1_1_1, 1_3_5_5, 1_1_0, 2_9_1_7_3, 3_1_7, 5_0_8_3_3, 1_0_8, 2_0_1_4_7, 9_4_6_6_5, 1_1_1, 7_7_1_9_8, 1_0_7, 1], [1_1_0, 6_2_6_1_3, 1_1_7, 6_3_8, 1_1_2, 1_1_3_3, 1_2_1, 2_0_0_9_8, 1_3_5_5, 7_9_0_5_0, 1_3_8_7_2, 1_3_5, 1_5_9_6, 5_3_5_4_1, 1_3_5_2, 1_4_1, 1_3_0_3_9, 5_5_4_2, 1_2_4, 3_0_2, 5_1_8, 1_1_1, 2_6_8, 2_9_5_6, 1_1_5, 1_4_9, 4_4_2_7, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_3_9, 1_2_3_5, 2_7_9_9, 1_8_2_8_9, 1_7_7_8_0, 2_0_4, 1_0_9, 9_4_7_4, 1_2_9_6, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A , model_name='''google/bigbird-pegasus-large-arxiv''' , revision='''ba85d0851d708441f91440d509690f1ab6353415''' , )
@require_sentencepiece
@require_tokenizers
class __lowercase ( _lowercase , unittest.TestCase ):
lowerCamelCase : str = PegasusTokenizer
lowerCamelCase : Optional[Any] = PegasusTokenizerFast
lowerCamelCase : Tuple = True
lowerCamelCase : str = True
def UpperCAmelCase__ (self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase_ : str = PegasusTokenizer(A , offset=0 , mask_token_sent=A , mask_token='''[MASK]''' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCAmelCase__ (self ):
return PegasusTokenizer.from_pretrained('''google/bigbird-pegasus-large-arxiv''' )
def UpperCAmelCase__ (self , **A ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self , A ):
return ("This is a test", "This is a test")
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCamelCase_ : Optional[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCamelCase_ : Tuple = (
'''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'''
''' <pad> <pad> <pad>'''
)
lowerCamelCase_ : Union[str, Any] = rust_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
lowerCamelCase_ : int = py_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
self.assertListEqual(A , A )
@require_torch
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = ['''This is going to be way too long.''' * 1_0_0_0, '''short example''']
lowerCamelCase_ : str = ['''not super long but more than 5 tokens''', '''tiny''']
lowerCamelCase_ : Tuple = self._large_tokenizer(A , padding=A , truncation=A , return_tensors='''pt''' )
lowerCamelCase_ : Optional[int] = self._large_tokenizer(
text_target=A , max_length=5 , padding=A , truncation=A , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 4_0_9_6)
assert batch.attention_mask.shape == (2, 4_0_9_6)
assert targets["input_ids"].shape == (2, 5)
assert len(A ) == 2 # input_ids, attention_mask.
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = (
'''This is an example string that is used to test the original TF implementation against the HF'''
''' implementation'''
)
lowerCamelCase_ : List[str] = self._large_tokenizer(A ).input_ids
self.assertListEqual(
A , [1_8_2, 1_1_7, 1_4_2, 5_8_7, 4_2_1_1, 1_2_0, 1_1_7, 2_6_3, 1_1_2, 8_0_4, 1_0_9, 8_5_6, 2_5_0_1_6, 3_1_3_7, 4_6_4, 1_0_9, 2_6_9_5_5, 3_1_3_7, 1] , )
| 318 | 1 |
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
a__: str = logging.get_logger(__name__)
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = UNetaDModel
__SCREAMING_SNAKE_CASE = '''sample'''
@property
def UpperCamelCase ( self ):
A__ = 4
A__ = 3
A__ = (32, 32)
A__ = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCamelCase )
A__ = torch.tensor([10] ).to(__lowerCamelCase )
return {"sample": noise, "timestep": time_step}
@property
def UpperCamelCase ( self ):
return (3, 32, 32)
@property
def UpperCamelCase ( self ):
return (3, 32, 32)
def UpperCamelCase ( self ):
A__ = {
'''block_out_channels''': (32, 64),
'''down_block_types''': ('''DownBlock2D''', '''AttnDownBlock2D'''),
'''up_block_types''': ('''AttnUpBlock2D''', '''UpBlock2D'''),
'''attention_head_dim''': 3,
'''out_channels''': 3,
'''in_channels''': 3,
'''layers_per_block''': 2,
'''sample_size''': 32,
}
A__ = self.dummy_input
return init_dict, inputs_dict
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = UNetaDModel
__SCREAMING_SNAKE_CASE = '''sample'''
@property
def UpperCamelCase ( self ):
A__ = 4
A__ = 4
A__ = (32, 32)
A__ = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCamelCase )
A__ = torch.tensor([10] ).to(__lowerCamelCase )
return {"sample": noise, "timestep": time_step}
@property
def UpperCamelCase ( self ):
return (4, 32, 32)
@property
def UpperCamelCase ( self ):
return (4, 32, 32)
def UpperCamelCase ( self ):
A__ = {
'''sample_size''': 32,
'''in_channels''': 4,
'''out_channels''': 4,
'''layers_per_block''': 2,
'''block_out_channels''': (32, 64),
'''attention_head_dim''': 32,
'''down_block_types''': ('''DownBlock2D''', '''DownBlock2D'''),
'''up_block_types''': ('''UpBlock2D''', '''UpBlock2D'''),
}
A__ = self.dummy_input
return init_dict, inputs_dict
def UpperCamelCase ( self ):
A__ , A__ = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''',output_loading_info=__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertEqual(len(loading_info['''missing_keys'''] ),0 )
model.to(__lowerCamelCase )
A__ = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''','''This test is supposed to run on GPU''' )
def UpperCamelCase ( self ):
A__ , A__ = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''',output_loading_info=__lowerCamelCase )
model.to(__lowerCamelCase )
A__ = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''','''This test is supposed to run on GPU''' )
def UpperCamelCase ( self ):
# by defautl model loading will use accelerate as `low_cpu_mem_usage=True`
A__ , A__ = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''',output_loading_info=__lowerCamelCase )
model_accelerate.to(__lowerCamelCase )
model_accelerate.eval()
A__ = torch.randn(
1,model_accelerate.config.in_channels,model_accelerate.config.sample_size,model_accelerate.config.sample_size,generator=torch.manual_seed(0 ),)
A__ = noise.to(__lowerCamelCase )
A__ = torch.tensor([10] * noise.shape[0] ).to(__lowerCamelCase )
A__ = model_accelerate(__lowerCamelCase,__lowerCamelCase )['''sample''']
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
A__ , A__ = UNetaDModel.from_pretrained(
'''fusing/unet-ldm-dummy-update''',output_loading_info=__lowerCamelCase,low_cpu_mem_usage=__lowerCamelCase )
model_normal_load.to(__lowerCamelCase )
model_normal_load.eval()
A__ = model_normal_load(__lowerCamelCase,__lowerCamelCase )['''sample''']
assert torch_all_close(__lowerCamelCase,__lowerCamelCase,rtol=1E-3 )
def UpperCamelCase ( self ):
A__ = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' )
model.eval()
model.to(__lowerCamelCase )
A__ = torch.randn(
1,model.config.in_channels,model.config.sample_size,model.config.sample_size,generator=torch.manual_seed(0 ),)
A__ = noise.to(__lowerCamelCase )
A__ = torch.tensor([10] * noise.shape[0] ).to(__lowerCamelCase )
with torch.no_grad():
A__ = model(__lowerCamelCase,__lowerCamelCase ).sample
A__ = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
A__ = torch.tensor([-13.3258, -20.1100, -15.9873, -17.6617, -23.0596, -17.9419, -13.3675, -16.1889, -12.3800] )
# fmt: on
self.assertTrue(torch_all_close(__lowerCamelCase,__lowerCamelCase,rtol=1E-3 ) )
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = UNetaDModel
__SCREAMING_SNAKE_CASE = '''sample'''
@property
def UpperCamelCase ( self,__lowerCamelCase=(32, 32) ):
A__ = 4
A__ = 3
A__ = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCamelCase )
A__ = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa,device=__lowerCamelCase )
return {"sample": noise, "timestep": time_step}
@property
def UpperCamelCase ( self ):
return (3, 32, 32)
@property
def UpperCamelCase ( self ):
return (3, 32, 32)
def UpperCamelCase ( self ):
A__ = {
'''block_out_channels''': [32, 64, 64, 64],
'''in_channels''': 3,
'''layers_per_block''': 1,
'''out_channels''': 3,
'''time_embedding_type''': '''fourier''',
'''norm_eps''': 1E-6,
'''mid_block_scale_factor''': math.sqrt(2.0 ),
'''norm_num_groups''': None,
'''down_block_types''': [
'''SkipDownBlock2D''',
'''AttnSkipDownBlock2D''',
'''SkipDownBlock2D''',
'''SkipDownBlock2D''',
],
'''up_block_types''': [
'''SkipUpBlock2D''',
'''SkipUpBlock2D''',
'''AttnSkipUpBlock2D''',
'''SkipUpBlock2D''',
],
}
A__ = self.dummy_input
return init_dict, inputs_dict
@slow
def UpperCamelCase ( self ):
A__ , A__ = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''',output_loading_info=__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertEqual(len(loading_info['''missing_keys'''] ),0 )
model.to(__lowerCamelCase )
A__ = self.dummy_input
A__ = floats_tensor((4, 3) + (256, 256) ).to(__lowerCamelCase )
A__ = noise
A__ = model(**__lowerCamelCase )
assert image is not None, "Make sure output is not None"
@slow
def UpperCamelCase ( self ):
A__ = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' )
model.to(__lowerCamelCase )
A__ = 4
A__ = 3
A__ = (256, 256)
A__ = torch.ones((batch_size, num_channels) + sizes ).to(__lowerCamelCase )
A__ = torch.tensor(batch_size * [1E-4] ).to(__lowerCamelCase )
with torch.no_grad():
A__ = model(__lowerCamelCase,__lowerCamelCase ).sample
A__ = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
A__ = torch.tensor([-4842.8691, -6499.6631, -3800.1953, -7978.2686, -10980.7129, -20028.8535, 8148.2822, 2342.2905, 567.7608] )
# fmt: on
self.assertTrue(torch_all_close(__lowerCamelCase,__lowerCamelCase,rtol=1E-2 ) )
def UpperCamelCase ( self ):
A__ = UNetaDModel.from_pretrained('''fusing/ncsnpp-ffhq-ve-dummy-update''' )
model.to(__lowerCamelCase )
A__ = 4
A__ = 3
A__ = (32, 32)
A__ = torch.ones((batch_size, num_channels) + sizes ).to(__lowerCamelCase )
A__ = torch.tensor(batch_size * [1E-4] ).to(__lowerCamelCase )
with torch.no_grad():
A__ = model(__lowerCamelCase,__lowerCamelCase ).sample
A__ = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
A__ = torch.tensor([-0.0325, -0.0900, -0.0869, -0.0332, -0.0725, -0.0270, -0.0101, 0.0227, 0.0256] )
# fmt: on
self.assertTrue(torch_all_close(__lowerCamelCase,__lowerCamelCase,rtol=1E-2 ) )
def UpperCamelCase ( self ):
# not required for this model
pass
| 355 |
import os
import sys
a__: int = os.path.join(os.path.dirname(__file__), 'src')
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
a__: Union[str, Any] = [
'torch',
'numpy',
'tokenizers',
'filelock',
'requests',
'tqdm',
'regex',
'sentencepiece',
'sacremoses',
'importlib_metadata',
'huggingface_hub',
]
@add_start_docstrings(AutoConfig.__doc__ )
def UpperCamelCase__( *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Union[str, Any] )->Any:
return AutoConfig.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def UpperCamelCase__( *UpperCamelCase__ : Dict , **UpperCamelCase__ : Any )->Dict:
return AutoTokenizer.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModel.__doc__ )
def UpperCamelCase__( *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Optional[Any] )->int:
return AutoModel.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def UpperCamelCase__( *UpperCamelCase__ : int , **UpperCamelCase__ : Union[str, Any] )->Any:
return AutoModelForCausalLM.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def UpperCamelCase__( *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Union[str, Any] )->int:
return AutoModelForMaskedLM.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def UpperCamelCase__( *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Any )->Optional[Any]:
return AutoModelForSequenceClassification.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def UpperCamelCase__( *UpperCamelCase__ : Any , **UpperCamelCase__ : Union[str, Any] )->Tuple:
return AutoModelForQuestionAnswering.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
| 39 | 0 |
"""simple docstring"""
from __future__ import annotations
import queue
class a :
"""simple docstring"""
def __init__( self: Optional[Any] , UpperCamelCase: str ):
"""simple docstring"""
A__ = data
A__ = None
A__ = None
def _snake_case ( ):
print("""\n********Press N to stop entering at any point of time********\n""" )
A__ = input("""Enter the value of the root node: """ ).strip().lower()
A__ = queue.Queue()
A__ = TreeNode(int(UpperCAmelCase_ ) )
q.put(UpperCAmelCase_ )
while not q.empty():
A__ = q.get()
A__ = F"""Enter the left node of {node_found.data}: """
A__ = input(UpperCAmelCase_ ).strip().lower() or """n"""
if check == "n":
return tree_node
A__ = TreeNode(int(UpperCAmelCase_ ) )
A__ = left_node
q.put(UpperCAmelCase_ )
A__ = F"""Enter the right node of {node_found.data}: """
A__ = input(UpperCAmelCase_ ).strip().lower() or """n"""
if check == "n":
return tree_node
A__ = TreeNode(int(UpperCAmelCase_ ) )
A__ = right_node
q.put(UpperCAmelCase_ )
raise
def _snake_case ( UpperCAmelCase_ : TreeNode ):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or not node:
return
print(node.data , end=""",""" )
pre_order(node.left )
pre_order(node.right )
def _snake_case ( UpperCAmelCase_ : TreeNode ):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or not node:
return
in_order(node.left )
print(node.data , end=""",""" )
in_order(node.right )
def _snake_case ( UpperCAmelCase_ : TreeNode ):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=""",""" )
def _snake_case ( UpperCAmelCase_ : TreeNode ):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or not node:
return
A__ = queue.Queue()
q.put(UpperCAmelCase_ )
while not q.empty():
A__ = q.get()
print(node_dequeued.data , end=""",""" )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def _snake_case ( UpperCAmelCase_ : TreeNode ):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or not node:
return
A__ = queue.Queue()
q.put(UpperCAmelCase_ )
while not q.empty():
A__ = []
while not q.empty():
A__ = q.get()
print(node_dequeued.data , end=""",""" )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(UpperCAmelCase_ )
def _snake_case ( UpperCAmelCase_ : TreeNode ):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or not node:
return
A__ = []
A__ = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=""",""" )
stack.append(UpperCAmelCase_ )
A__ = n.left
# end of while means current node doesn't have left child
A__ = stack.pop()
# start to traverse its right child
A__ = n.right
def _snake_case ( UpperCAmelCase_ : TreeNode ):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or not node:
return
A__ = []
A__ = node
while n or stack:
while n:
stack.append(UpperCAmelCase_ )
A__ = n.left
A__ = stack.pop()
print(n.data , end=""",""" )
A__ = n.right
def _snake_case ( UpperCAmelCase_ : TreeNode ):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or not node:
return
A__ , A__ = [], []
A__ = node
stacka.append(UpperCAmelCase_ )
while stacka: # to find the reversed order of post order, store it in stack2
A__ = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(UpperCAmelCase_ )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=""",""" )
def _snake_case ( UpperCAmelCase_ : str = "" , UpperCAmelCase_ : Dict=50 , UpperCAmelCase_ : str="*" ):
if not s:
return "\n" + width * char
A__ , A__ = divmod(width - len(UpperCAmelCase_ ) - 2 , 2 )
return F"""{left * char} {s} {(left + extra) * char}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt('Binary Tree Traversals'))
SCREAMING_SNAKE_CASE_ : TreeNode = build_tree()
print(prompt('Pre Order Traversal'))
pre_order(node)
print(prompt() + '\n')
print(prompt('In Order Traversal'))
in_order(node)
print(prompt() + '\n')
print(prompt('Post Order Traversal'))
post_order(node)
print(prompt() + '\n')
print(prompt('Level Order Traversal'))
level_order(node)
print(prompt() + '\n')
print(prompt('Actual Level Order Traversal'))
level_order_actual(node)
print('*' * 5_0 + '\n')
print(prompt('Pre Order Traversal - Iteration Version'))
pre_order_iter(node)
print(prompt() + '\n')
print(prompt('In Order Traversal - Iteration Version'))
in_order_iter(node)
print(prompt() + '\n')
print(prompt('Post Order Traversal - Iteration Version'))
post_order_iter(node)
print(prompt())
| 335 |
"""simple docstring"""
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
SCREAMING_SNAKE_CASE_ : int = [
'Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the'
' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe'
' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.',
'The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal'
' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s'
' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the'
' body.',
'Amnesty International releases its annual report on the death penalty. The report catalogs the use of'
' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the'
' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital'
' punishment.',
]
SCREAMING_SNAKE_CASE_ : List[Any] = [
'Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .'
' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz'
' had informed his Lufthansa training school of an episode of severe depression, airline says .',
'Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .'
' Israel and the United States opposed the move, which could open the door to war crimes investigations against'
' Israelis .',
'Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to'
' death . Organization claims that governments around the world are using the threat of terrorism to advance'
' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death'
' sentences up by 28% .',
]
def _snake_case ( ):
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , bootstrap_aggregation=UpperCAmelCase_ , rouge_keys=["""rouge2""", """rougeL"""] )
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , bootstrap_aggregation=UpperCAmelCase_ , rouge_keys=["""rouge2"""] )
assert (
pd.DataFrame(no_aggregation["""rouge2"""] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["""rouge2"""] ).fmeasure.mean()
)
def _snake_case ( ):
A__ = """rougeLsum"""
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=[k] )[k]
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=[k] )[k]
assert score > score_no_sep
def _snake_case ( ):
A__ = ["""rouge1""", """rouge2""", """rougeL"""]
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=UpperCAmelCase_ )
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=UpperCAmelCase_ )
assert score_sep == score_no_sep
def _snake_case ( ):
A__ = [
"""Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .""",
]
A__ = [
"""Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"""
""" the final seconds on board Flight 9525.""",
]
assert calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ ) == calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ )
def _snake_case ( ):
A__ = [
"""\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" """
]
A__ = [
""" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."""
]
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , rouge_keys=["""rougeLsum"""] , newline_sep=UpperCAmelCase_ )["""rougeLsum"""]
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , rouge_keys=["""rougeLsum"""] )["""rougeLsum"""]
assert new_score > prev_score
def _snake_case ( ):
A__ = Path("""examples/seq2seq/test_data/wmt_en_ro""" )
A__ = calculate_rouge_path(data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) )
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = calculate_rouge_path(
data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) , bootstrap_aggregation=UpperCAmelCase_ )
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
| 335 | 1 |
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> int:
return abs(UpperCamelCase_ ) if a == 0 else greatest_common_divisor(b % a , UpperCamelCase_ )
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> int:
while y: # --> when y=0 then loop will terminate and return x as final GCD.
UpperCamelCase_ , UpperCamelCase_ = y, x % y
return abs(UpperCamelCase_ )
def lowerCAmelCase_ ( ) -> Any:
try:
UpperCamelCase_ = input("Enter two integers separated by comma (,): " ).split("," )
UpperCamelCase_ = int(nums[0] )
UpperCamelCase_ = int(nums[1] )
print(
F'''greatest_common_divisor({num_a}, {num_a}) = '''
F'''{greatest_common_divisor(UpperCamelCase_ , UpperCamelCase_ )}''' )
print(F'''By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(UpperCamelCase_ , UpperCamelCase_ )}''' )
except (IndexError, UnboundLocalError, ValueError):
print("Wrong input" )
if __name__ == "__main__":
main()
| 328 |
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
_UpperCAmelCase = {'UserAgent': UserAgent().random}
def lowerCAmelCase_ ( UpperCamelCase_ ) -> dict:
UpperCamelCase_ = script.contents[0]
UpperCamelCase_ = json.loads(data[data.find("{\"config\"" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class _UpperCamelCase :
def __init__( self: Optional[Any] , _SCREAMING_SNAKE_CASE: str ) -> str:
"""simple docstring"""
UpperCamelCase_ = f'''https://www.instagram.com/{username}/'''
UpperCamelCase_ = self.get_json()
def lowercase ( self: Union[str, Any] ) -> dict:
"""simple docstring"""
UpperCamelCase_ = requests.get(self.url , headers=_SCREAMING_SNAKE_CASE ).text
UpperCamelCase_ = BeautifulSoup(_SCREAMING_SNAKE_CASE , "html.parser" ).find_all("script" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self: Tuple ) -> str:
"""simple docstring"""
return f'''{self.__class__.__name__}(\'{self.username}\')'''
def __str__( self: List[Any] ) -> str:
"""simple docstring"""
return f'''{self.fullname} ({self.username}) is {self.biography}'''
@property
def lowercase ( self: List[str] ) -> str:
"""simple docstring"""
return self.user_data["username"]
@property
def lowercase ( self: int ) -> str:
"""simple docstring"""
return self.user_data["full_name"]
@property
def lowercase ( self: List[Any] ) -> str:
"""simple docstring"""
return self.user_data["biography"]
@property
def lowercase ( self: List[Any] ) -> str:
"""simple docstring"""
return self.user_data["business_email"]
@property
def lowercase ( self: List[Any] ) -> str:
"""simple docstring"""
return self.user_data["external_url"]
@property
def lowercase ( self: List[Any] ) -> int:
"""simple docstring"""
return self.user_data["edge_followed_by"]["count"]
@property
def lowercase ( self: List[str] ) -> int:
"""simple docstring"""
return self.user_data["edge_follow"]["count"]
@property
def lowercase ( self: List[str] ) -> int:
"""simple docstring"""
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def lowercase ( self: List[str] ) -> str:
"""simple docstring"""
return self.user_data["profile_pic_url_hd"]
@property
def lowercase ( self: Optional[int] ) -> bool:
"""simple docstring"""
return self.user_data["is_verified"]
@property
def lowercase ( self: List[str] ) -> bool:
"""simple docstring"""
return self.user_data["is_private"]
def lowerCAmelCase_ ( UpperCamelCase_ = "github" ) -> None:
import os
if os.environ.get("CI" ):
return # test failing on GitHub Actions
UpperCamelCase_ = InstagramUser(UpperCamelCase_ )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , UpperCamelCase_ )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "[email protected]"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("https://instagram." )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCAmelCase = InstagramUser('github')
print(instagram_user)
print(f'''{instagram_user.number_of_posts = }''')
print(f'''{instagram_user.number_of_followers = }''')
print(f'''{instagram_user.number_of_followings = }''')
print(f'''{instagram_user.email = }''')
print(f'''{instagram_user.website = }''')
print(f'''{instagram_user.profile_picture_url = }''')
print(f'''{instagram_user.is_verified = }''')
print(f'''{instagram_user.is_private = }''')
| 328 | 1 |
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class A :
def __init__(self , lowerCAmelCase = "cpu" , lowerCAmelCase = "openai/clip-vit-large-patch14" ):
__lowercase= device
__lowercase= CLIPTokenizerFast.from_pretrained(SCREAMING_SNAKE_CASE_ )
__lowercase= [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73]
__lowercase= [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11]
__lowercase= torchvision.transforms.Normalize(self.image_mean , self.image_std )
__lowercase= torchvision.transforms.Resize(2_2_4 )
__lowercase= torchvision.transforms.CenterCrop(2_2_4 )
def _A (self , lowerCAmelCase ):
__lowercase= self.resize(SCREAMING_SNAKE_CASE_ )
__lowercase= self.center_crop(SCREAMING_SNAKE_CASE_ )
__lowercase= self.normalize(SCREAMING_SNAKE_CASE_ )
return images
def __call__(self , lowerCAmelCase=None , lowerCAmelCase=None , **lowerCAmelCase ):
__lowercase= self.tokenizer(text=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__lowercase= self.preprocess_img(SCREAMING_SNAKE_CASE_ )
__lowercase= {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class A ( nn.Module ):
def __init__(self , lowerCAmelCase=1_0 , lowerCAmelCase=0.01 , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=False , lowerCAmelCase=True , lowerCAmelCase="image" , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=False , ):
super().__init__()
__lowercase= None
__lowercase= device if device else get_device()
if vqgan:
__lowercase= vqgan
else:
__lowercase= load_vqgan(self.device , conf_path=SCREAMING_SNAKE_CASE_ , ckpt_path=SCREAMING_SNAKE_CASE_ )
self.vqgan.eval()
if clip:
__lowercase= clip
else:
__lowercase= CLIPModel.from_pretrained('openai/clip-vit-base-patch32' )
self.clip.to(self.device )
__lowercase= ProcessorGradientFlow(device=self.device )
__lowercase= iterations
__lowercase= lr
__lowercase= log
__lowercase= make_grid
__lowercase= return_val
__lowercase= quantize
__lowercase= self.vqgan.decoder.z_shape
def _A (self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=5 , lowerCAmelCase=True ):
__lowercase= []
if output_path is None:
__lowercase= './animation.gif'
if input_path is None:
__lowercase= self.save_path
__lowercase= sorted(glob(input_path + '/*' ) )
if not len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
'No images found in save path, aborting (did you pass save_intermediate=True to the generate'
' function?)' )
if len(SCREAMING_SNAKE_CASE_ ) == 1:
print('Only one image found in save path, (did you pass save_intermediate=True to the generate function?)' )
__lowercase= total_duration / len(SCREAMING_SNAKE_CASE_ )
__lowercase= [frame_duration] * len(SCREAMING_SNAKE_CASE_ )
if extend_frames:
__lowercase= 1.5
__lowercase= 3
for file_name in paths:
if file_name.endswith('.png' ):
images.append(imageio.imread(SCREAMING_SNAKE_CASE_ ) )
imageio.mimsave(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , duration=SCREAMING_SNAKE_CASE_ )
print(f'gif saved to {output_path}' )
def _A (self , lowerCAmelCase=None , lowerCAmelCase=None ):
if not (path or img):
raise ValueError('Input either path or tensor' )
if img is not None:
raise NotImplementedError
__lowercase= preprocess(Image.open(SCREAMING_SNAKE_CASE_ ) , target_image_size=2_5_6 ).to(self.device )
__lowercase= preprocess_vqgan(SCREAMING_SNAKE_CASE_ )
__lowercase, *__lowercase= self.vqgan.encode(SCREAMING_SNAKE_CASE_ )
return z
def _A (self , lowerCAmelCase ):
__lowercase= self.latent.detach().requires_grad_()
__lowercase= base_latent + transform_vector
if self.quantize:
__lowercase, *__lowercase= self.vqgan.quantize(SCREAMING_SNAKE_CASE_ )
else:
__lowercase= trans_latent
return self.vqgan.decode(SCREAMING_SNAKE_CASE_ )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None ):
__lowercase= self.clip_preprocessor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_ , return_tensors='pt' , padding=SCREAMING_SNAKE_CASE_ )
__lowercase= self.clip(**SCREAMING_SNAKE_CASE_ )
__lowercase= clip_outputs.logits_per_image
if weights is not None:
__lowercase= similarity_logits * weights
return similarity_logits.sum()
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= self._get_clip_similarity(pos_prompts['prompts'] , SCREAMING_SNAKE_CASE_ , weights=(1 / pos_prompts['weights']) )
if neg_prompts:
__lowercase= self._get_clip_similarity(neg_prompts['prompts'] , SCREAMING_SNAKE_CASE_ , weights=neg_prompts['weights'] )
else:
__lowercase= torch.tensor([1] , device=self.device )
__lowercase= -torch.log(SCREAMING_SNAKE_CASE_ ) + torch.log(SCREAMING_SNAKE_CASE_ )
return loss
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= torch.randn_like(self.latent , requires_grad=SCREAMING_SNAKE_CASE_ , device=self.device )
__lowercase= torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
__lowercase= self._add_vector(SCREAMING_SNAKE_CASE_ )
__lowercase= loop_post_process(SCREAMING_SNAKE_CASE_ )
__lowercase= self._get_CLIP_loss(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
print('CLIP loss' , SCREAMING_SNAKE_CASE_ )
if self.log:
wandb.log({'CLIP Loss': clip_loss} )
clip_loss.backward(retain_graph=SCREAMING_SNAKE_CASE_ )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
wandb.init(reinit=SCREAMING_SNAKE_CASE_ , project='face-editor' )
wandb.config.update({'Positive Prompts': positive_prompts} )
wandb.config.update({'Negative Prompts': negative_prompts} )
wandb.config.update({'lr': self.lr, 'iterations': self.iterations} )
if image_path:
__lowercase= Image.open(SCREAMING_SNAKE_CASE_ )
__lowercase= image.resize((2_5_6, 2_5_6) )
wandb.log('Original Image' , wandb.Image(SCREAMING_SNAKE_CASE_ ) )
def _A (self , lowerCAmelCase ):
if not prompts:
return []
__lowercase= []
__lowercase= []
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__lowercase= [prompt.strip() for prompt in prompts.split('|' )]
for prompt in prompts:
if isinstance(SCREAMING_SNAKE_CASE_ , (tuple, list) ):
__lowercase= prompt[0]
__lowercase= float(prompt[1] )
elif ":" in prompt:
__lowercase, __lowercase= prompt.split(':' )
__lowercase= float(SCREAMING_SNAKE_CASE_ )
else:
__lowercase= prompt
__lowercase= 1.0
processed_prompts.append(SCREAMING_SNAKE_CASE_ )
weights.append(SCREAMING_SNAKE_CASE_ )
return {
"prompts": processed_prompts,
"weights": torch.tensor(SCREAMING_SNAKE_CASE_ , device=self.device ),
}
def _A (self , lowerCAmelCase , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=None , ):
if image_path:
__lowercase= self._get_latent(SCREAMING_SNAKE_CASE_ )
else:
__lowercase= torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert pos_prompts, "You must provide at least one positive prompt."
__lowercase= self.process_prompts(SCREAMING_SNAKE_CASE_ )
__lowercase= self.process_prompts(SCREAMING_SNAKE_CASE_ )
if save_final and save_path is None:
__lowercase= os.path.join('./outputs/' , '_'.join(pos_prompts['prompts'] ) )
if not os.path.exists(SCREAMING_SNAKE_CASE_ ):
os.makedirs(SCREAMING_SNAKE_CASE_ )
else:
__lowercase= save_path + '_' + get_timestamp()
os.makedirs(SCREAMING_SNAKE_CASE_ )
__lowercase= save_path
__lowercase= self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('Original Image' )
show_pil(custom_to_pil(SCREAMING_SNAKE_CASE_ ) )
__lowercase= loop_post_process(SCREAMING_SNAKE_CASE_ )
for iter, transformed_img in enumerate(self._optimize_CLIP(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ):
if show_intermediate:
show_pil(SCREAMING_SNAKE_CASE_ )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , f'iter_{iter:03d}.png' ) )
if self.log:
wandb.log({'Image': wandb.Image(SCREAMING_SNAKE_CASE_ )} )
if show_final:
show_pil(SCREAMING_SNAKE_CASE_ )
if save_final:
transformed_img.save(os.path.join(self.save_path , f'iter_{iter:03d}_final.png' ) )
| 295 |
from __future__ import annotations
from typing import Generic, TypeVar
lowerCamelCase_ = TypeVar('''T''')
class __A( Generic[T] ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = data
UpperCamelCase__ = self
UpperCamelCase__ = 0
class __A( Generic[T] ):
"""simple docstring"""
def __init__(self ):
# map from node name to the node object
UpperCamelCase__ = {}
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
# create a new set with x as its member
UpperCamelCase__ = DisjointSetTreeNode(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
# find the set x belongs to (with path-compression)
UpperCamelCase__ = self.map[data]
if elem_ref != elem_ref.parent:
UpperCamelCase__ = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
# helper function for union operation
if nodea.rank > nodea.rank:
UpperCamelCase__ = nodea
else:
UpperCamelCase__ = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
# merge 2 disjoint sets
self.link(self.find_set(SCREAMING_SNAKE_CASE_ ) , self.find_set(SCREAMING_SNAKE_CASE_ ) )
class __A( Generic[T] ):
"""simple docstring"""
def __init__(self ):
# connections: map from the node to the neighbouring nodes (with weights)
UpperCamelCase__ = {}
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
# add a node ONLY if its not present in the graph
if node not in self.connections:
UpperCamelCase__ = {}
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
# add an edge with the given weight
self.add_node(SCREAMING_SNAKE_CASE_ )
self.add_node(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = weight
UpperCamelCase__ = weight
def UpperCAmelCase_ (self ):
UpperCamelCase__ = []
UpperCamelCase__ = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda SCREAMING_SNAKE_CASE_ : x[2] )
# creating the disjoint set
UpperCamelCase__ = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(SCREAMING_SNAKE_CASE_ )
# MST generation
UpperCamelCase__ = 0
UpperCamelCase__ = 0
UpperCamelCase__ = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = edges[index]
index += 1
UpperCamelCase__ = disjoint_set.find_set(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = disjoint_set.find_set(SCREAMING_SNAKE_CASE_ )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
disjoint_set.union(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return graph
| 244 | 0 |
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class a_ :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=True , _lowerCamelCase=99 , _lowerCamelCase=32 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=16 , _lowerCamelCase=2 , _lowerCamelCase=0.0_2 , _lowerCamelCase=3 , _lowerCamelCase=4 , _lowerCamelCase=None , ) ->str:
SCREAMING_SNAKE_CASE : Optional[Any] = parent
SCREAMING_SNAKE_CASE : List[Any] = batch_size
SCREAMING_SNAKE_CASE : str = seq_length
SCREAMING_SNAKE_CASE : List[str] = is_training
SCREAMING_SNAKE_CASE : Tuple = use_input_mask
SCREAMING_SNAKE_CASE : Union[str, Any] = use_token_type_ids
SCREAMING_SNAKE_CASE : Union[str, Any] = use_labels
SCREAMING_SNAKE_CASE : List[str] = vocab_size
SCREAMING_SNAKE_CASE : List[str] = hidden_size
SCREAMING_SNAKE_CASE : Dict = num_hidden_layers
SCREAMING_SNAKE_CASE : Dict = num_attention_heads
SCREAMING_SNAKE_CASE : Tuple = intermediate_size
SCREAMING_SNAKE_CASE : Tuple = hidden_act
SCREAMING_SNAKE_CASE : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : int = type_vocab_size
SCREAMING_SNAKE_CASE : Optional[int] = type_sequence_label_size
SCREAMING_SNAKE_CASE : Any = initializer_range
SCREAMING_SNAKE_CASE : Dict = num_labels
SCREAMING_SNAKE_CASE : List[Any] = num_choices
SCREAMING_SNAKE_CASE : Union[str, Any] = scope
def __lowerCAmelCase ( self ) ->int:
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : List[str] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : Any = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : Any = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE : List[Any] = None
SCREAMING_SNAKE_CASE : Optional[Any] = None
SCREAMING_SNAKE_CASE : Tuple = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self ) ->str:
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->List[Any]:
SCREAMING_SNAKE_CASE : Union[str, Any] = LlamaModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ) ->Dict:
SCREAMING_SNAKE_CASE : Any = True
SCREAMING_SNAKE_CASE : Optional[int] = LlamaModel(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Tuple = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , encoder_attention_mask=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : List[str] = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : int = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : List[Any] = LlamaForCausalLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Any = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ) ->Any:
SCREAMING_SNAKE_CASE : str = True
SCREAMING_SNAKE_CASE : str = True
SCREAMING_SNAKE_CASE : str = LlamaForCausalLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
# first forward pass
SCREAMING_SNAKE_CASE : int = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , encoder_attention_mask=lowerCamelCase_ , use_cache=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : int = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE : Dict = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE : str = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE : str = torch.cat([input_mask, next_mask] , dim=-1 )
SCREAMING_SNAKE_CASE : str = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , encoder_attention_mask=lowerCamelCase_ , output_hidden_states=lowerCamelCase_ , )['''hidden_states'''][0]
SCREAMING_SNAKE_CASE : Dict = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , encoder_attention_mask=lowerCamelCase_ , past_key_values=lowerCamelCase_ , output_hidden_states=lowerCamelCase_ , )['''hidden_states'''][0]
# select random slice
SCREAMING_SNAKE_CASE : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE : List[str] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 ) )
def __lowerCAmelCase ( self ) ->str:
SCREAMING_SNAKE_CASE : Any = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) : List[str] = config_and_inputs
SCREAMING_SNAKE_CASE : Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class a_ ( a__ , a__ , a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE : str = (LlamaForCausalLM,) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE : Optional[Any] = (
{
'feature-extraction': LlamaModel,
'text-classification': LlamaForSequenceClassification,
'text-generation': LlamaForCausalLM,
'zero-shot': LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE : int = False
__SCREAMING_SNAKE_CASE : Any = False
def __lowerCAmelCase ( self ) ->int:
SCREAMING_SNAKE_CASE : List[str] = LlamaModelTester(self )
SCREAMING_SNAKE_CASE : List[Any] = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 )
def __lowerCAmelCase ( self ) ->Union[str, Any]:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE : Optional[Any] = type
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def __lowerCAmelCase ( self ) ->str:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Optional[Any] = 3
SCREAMING_SNAKE_CASE : List[str] = input_dict['''input_ids''']
SCREAMING_SNAKE_CASE : Tuple = input_ids.ne(1 ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE : List[str] = LlamaForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Tuple = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , labels=lowerCamelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Optional[Any] = 3
SCREAMING_SNAKE_CASE : int = '''single_label_classification'''
SCREAMING_SNAKE_CASE : Optional[int] = input_dict['''input_ids''']
SCREAMING_SNAKE_CASE : Union[str, Any] = input_ids.ne(1 ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Optional[int] = LlamaForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : List[Any] = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , labels=lowerCamelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __lowerCAmelCase ( self ) ->int:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : int = 3
SCREAMING_SNAKE_CASE : int = '''multi_label_classification'''
SCREAMING_SNAKE_CASE : Tuple = input_dict['''input_ids''']
SCREAMING_SNAKE_CASE : Union[str, Any] = input_ids.ne(1 ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
SCREAMING_SNAKE_CASE : int = LlamaForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , labels=lowerCamelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('''LLaMA buffers include complex numbers, which breaks this test''' )
def __lowerCAmelCase ( self ) ->Optional[Any]:
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[int]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : int = ids_tensor([1, 10] , config.vocab_size )
SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE : Any = LlamaModel(lowerCamelCase_ )
original_model.to(lowerCamelCase_ )
original_model.eval()
SCREAMING_SNAKE_CASE : Union[str, Any] = original_model(lowerCamelCase_ ).last_hidden_state
SCREAMING_SNAKE_CASE : Optional[Any] = original_model(lowerCamelCase_ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE : Optional[int] = {'''type''': scaling_type, '''factor''': 1_0.0}
SCREAMING_SNAKE_CASE : Optional[Any] = LlamaModel(lowerCamelCase_ )
scaled_model.to(lowerCamelCase_ )
scaled_model.eval()
SCREAMING_SNAKE_CASE : Any = scaled_model(lowerCamelCase_ ).last_hidden_state
SCREAMING_SNAKE_CASE : Any = scaled_model(lowerCamelCase_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-5 ) )
@require_torch
class a_ ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def __lowerCAmelCase ( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Union[str, Any] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
SCREAMING_SNAKE_CASE : int = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-7b-hf''' , device_map='''auto''' )
SCREAMING_SNAKE_CASE : Optional[int] = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[-6.6_5_5_0, -4.1_2_2_7, -4.9_8_5_9, -3.2_4_0_6, 0.8_2_6_2, -3.0_0_3_3, 1.2_9_6_4, -3.3_6_9_9]] )
torch.testing.assert_close(out.mean(-1 ) , lowerCamelCase_ , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([-1_2.8_2_8_1, -7.4_4_5_3, -0.4_6_3_9, -8.0_6_2_5, -7.2_5_0_0, -8.0_0_0_0, -6.4_8_8_3, -7.7_6_9_5, -7.8_4_3_8, -7.0_3_1_2, -6.2_1_8_8, -7.1_3_2_8, -1.8_4_9_6, 1.9_9_6_1, -8.6_2_5_0, -6.7_2_2_7, -1_2.8_2_8_1, -6.9_4_9_2, -7.0_7_4_2, -7.7_8_5_2, -7.5_8_2_0, -7.9_0_6_2, -6.9_3_7_5, -7.9_8_0_5, -8.3_4_3_8, -8.1_5_6_2, -8.0_4_6_9, -7.6_2_5_0, -7.7_4_2_2, -7.3_3_9_8,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , lowerCamelCase_ , atol=1e-5 , rtol=1e-5 )
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : Tuple = [1, 306, 4658, 278, 6593, 310, 2834, 338]
SCREAMING_SNAKE_CASE : int = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-hf''' , device_map='''auto''' )
SCREAMING_SNAKE_CASE : List[Any] = model(torch.tensor(lowerCamelCase_ ) )
# Expected mean on dim = -1
SCREAMING_SNAKE_CASE : Dict = torch.tensor([[-2.0_6_2_2, -1.2_7_9_4, -1.1_6_3_8, -0.9_7_8_8, -1.4_6_0_3, -1.0_2_3_8, -1.7_8_9_3, -1.4_4_1_1]] )
torch.testing.assert_close(out.mean(-1 ) , lowerCamelCase_ , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([-8.1_4_0_6, -8.0_5_4_7, 2.7_4_6_1, -1.2_3_4_4, -0.1_4_4_8, -1.8_2_6_2, -1.0_0_2_0, -1.8_1_5_4, -1.6_8_9_5, -1.8_5_1_6, -2.3_5_7_4, -0.9_2_7_7, 3.7_5_9_8, 6.5_7_4_2, -1.2_9_9_8, -0.1_1_7_7, -8.1_4_0_6, -2.9_6_8_8, -2.9_1_9_9, -3.1_6_9_9, -3.5_2_5_4, -2.3_5_5_5, -2.7_9_8_8, -3.4_1_4_1, -2.8_2_6_2, -4.5_1_9_5, -3.3_3_7_9, -3.3_1_6_4, -2.7_8_3_2, -3.0_2_7_3] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , lowerCamelCase_ , atol=1e-5 , rtol=1e-5 )
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : int = [1, 306, 4658, 278, 6593, 310, 2834, 338]
SCREAMING_SNAKE_CASE : Dict = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' , device_map='''auto''' )
SCREAMING_SNAKE_CASE : Any = model(torch.tensor(lowerCamelCase_ ) )
# Expected mean on dim = -1
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[-0.8_5_6_2, -1.8_5_2_0, -0.7_5_5_1, -0.4_1_6_2, -1.5_1_6_1, -1.2_0_3_8, -2.4_8_2_3, -2.3_2_5_4]] )
torch.testing.assert_close(out.mean(-1 ) , lowerCamelCase_ , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
SCREAMING_SNAKE_CASE : str = torch.tensor([-2.2_2_2_7, 4.8_8_2_8, 0.9_0_2_3, -0.4_5_7_8, -0.7_8_7_1, -0.1_0_3_3, -0.6_2_2_1, -0.5_7_8_6, -0.7_8_0_3, -1.0_6_7_4, -1.2_9_2_0, -0.1_5_7_0, 0.8_0_0_8, 2.0_7_2_3, -0.9_4_9_7, 0.2_7_7_1, -2.2_2_2_7, -0.7_6_1_2, -1.4_3_4_6, -1.2_0_6_1, -1.6_4_2_6, -0.3_0_0_0, -0.7_1_3_9, -1.1_9_3_4, -1.8_6_9_1, -1.6_9_7_3, -1.5_9_4_7, -1.2_7_0_5, -0.3_5_2_3, -0.5_5_1_3] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , lowerCamelCase_ , atol=1e-2 , rtol=1e-2 )
@unittest.skip(
'''Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test''' )
@slow
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE : List[Any] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
SCREAMING_SNAKE_CASE : List[Any] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-70b-hf''' , device_map='''auto''' )
SCREAMING_SNAKE_CASE : int = model(torch.tensor(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(
[[-4.2_3_2_7, -3.3_3_6_0, -4.6_6_6_5, -4.7_6_3_1, -1.8_1_8_0, -3.4_1_7_0, -1.4_2_1_1, -3.1_8_1_0]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , lowerCamelCase_ , atol=1e-2 , rtol=1e-2 )
# fmt: off
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([-9.4_9_2_2, -3.9_5_5_1, 1.7_9_9_8, -5.6_7_5_8, -5.1_0_5_5, -5.8_9_8_4, -4.8_3_2_0, -6.8_0_8_6, -6.5_3_9_1, -5.6_1_7_2, -5.5_8_2_0, -5.5_3_5_2, 1.7_8_8_1, 3.6_2_8_9, -6.5_1_1_7, -3.4_7_8_5, -9.5_0_0_0, -6.0_3_5_2, -6.8_1_2_5, -6.0_1_9_5, -6.6_8_3_6, -5.4_7_2_7, -6.2_8_1_2, -6.0_3_9_1, -7.3_3_9_8, -7.4_2_9_7, -7.4_8_4_4, -6.5_8_2_0, -5.8_7_8_9, -5.5_3_1_2] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , lowerCamelCase_ , atol=1e-5 , rtol=1e-5 )
@unittest.skip('''Model is curently gated''' )
@slow
def __lowerCAmelCase ( self ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : Optional[int] = '''Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi'''
SCREAMING_SNAKE_CASE : Any = '''Simply put, the theory of relativity states that '''
SCREAMING_SNAKE_CASE : Optional[int] = LlamaTokenizer.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' )
SCREAMING_SNAKE_CASE : Any = tokenizer.encode(lowerCamelCase_ , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE : int = LlamaForCausalLM.from_pretrained(
'''meta-llama/Llama-2-13b-chat-hf''' , device_map='''sequential''' , use_safetensors=lowerCamelCase_ )
# greedy generation outputs
SCREAMING_SNAKE_CASE : Optional[int] = model.generate(lowerCamelCase_ , max_new_tokens=64 , top_p=lowerCamelCase_ , temperature=1 , do_sample=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.decode(generated_ids[0] , skip_special_tokens=lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
| 354 |
from sklearn.metrics import matthews_corrcoef
import datasets
a__ : Optional[Any] = '''
Compute the Matthews correlation coefficient (MCC)
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary and multiclass classifications. It takes
into account true and false positives and negatives and is generally
regarded as a balanced measure which can be used even if the classes are of
very different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
'''
a__ : str = '''
Args:
predictions (list of int): Predicted labels, as returned by a model.
references (list of int): Ground truth labels.
sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.
Returns:
matthews_correlation (dict containing float): Matthews correlation.
Examples:
Example 1, a basic example with only predictions and references as inputs:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3])
>>> print(round(results[\'matthews_correlation\'], 2))
0.54
Example 2, the same example as above, but also including sample weights:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 3, 1, 1, 1, 2])
>>> print(round(results[\'matthews_correlation\'], 2))
0.1
Example 3, the same example as above, but with sample weights that cause a negative correlation:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 1, 0, 0, 0, 1])
>>> print(round(results[\'matthews_correlation\'], 2))
-0.25
'''
a__ : Union[str, Any] = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html'''
] , )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ) ->List[str]:
return {
"matthews_correlation": float(matthews_corrcoef(_lowerCamelCase , _lowerCamelCase , sample_weight=_lowerCamelCase ) ),
}
| 19 | 0 |
UpperCamelCase = {}
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> int:
"""simple docstring"""
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
_SCREAMING_SNAKE_CASE = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
_SCREAMING_SNAKE_CASE = _calculate(days - 1 ,lowerCAmelCase__ ,late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
_SCREAMING_SNAKE_CASE = _calculate(days - 1 ,absent + 1 ,0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
_SCREAMING_SNAKE_CASE = _calculate(days - 1 ,lowerCAmelCase__ ,0 )
_SCREAMING_SNAKE_CASE = state_late + state_absent + state_ontime
_SCREAMING_SNAKE_CASE = prizestrings
return prizestrings
def __lowerCamelCase ( snake_case__ = 30 ) -> int:
"""simple docstring"""
return _calculate(lowerCAmelCase__ ,absent=0 ,late=0 )
if __name__ == "__main__":
print(solution())
| 306 | """simple docstring"""
from ..utils import DummyObject, requires_backends
class _A ( metaclass=lowerCAmelCase ):
snake_case__ : List[str] = ['onnx']
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ["""onnx"""] )
@classmethod
def A__ ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls , ["""onnx"""] )
@classmethod
def A__ ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls , ["""onnx"""] )
| 197 | 0 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
_snake_case = logging.get_logger(__name__)
@dataclass
class _snake_case ( _lowercase ):
lowerCamelCase__: Dict = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self: Dict , **__lowerCamelCase: Union[str, Any] ) -> int:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
__UpperCAmelCase : Optional[Any] = deprecated_arg[3:]
__UpperCAmelCase : Any = not kwargs.pop(__lowerCamelCase )
logger.warning(
f'''{deprecated_arg} is depreciated. Please use --no-{positive_arg} or'''
f''' {positive_arg}={kwargs[positive_arg]}''' )
__UpperCAmelCase : Union[str, Any] = kwargs.pop("tpu_name" , self.tpu_name )
__UpperCAmelCase : List[str] = kwargs.pop("device_idx" , self.device_idx )
__UpperCAmelCase : str = kwargs.pop("eager_mode" , self.eager_mode )
__UpperCAmelCase : List[Any] = kwargs.pop("use_xla" , self.use_xla )
super().__init__(**__lowerCamelCase )
lowerCamelCase__: str = field(
default=_lowercase , metadata={"help": "Name of TPU"} , )
lowerCamelCase__: int = field(
default=0 , metadata={"help": "CPU / GPU device index. Defaults to 0."} , )
lowerCamelCase__: bool = field(default=_lowercase , metadata={"help": "Benchmark models in eager model."} )
lowerCamelCase__: bool = field(
default=_lowercase , metadata={
"help": "Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."
} , )
@cached_property
def _lowerCamelCase ( self: List[str] ) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]:
requires_backends(self , ["tf"] )
__UpperCAmelCase : List[Any] = None
if self.tpu:
try:
if self.tpu_name:
__UpperCAmelCase : Union[str, Any] = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
__UpperCAmelCase : Optional[int] = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
__UpperCAmelCase : int = None
return tpu
@cached_property
def _lowerCamelCase ( self: Union[str, Any] ) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]:
requires_backends(self , ["tf"] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
__UpperCAmelCase : Dict = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , "GPU" )
__UpperCAmelCase : int = tf.distribute.OneDeviceStrategy(device=f'''/gpu:{self.device_idx}''' )
else:
tf.config.set_visible_devices([] , "GPU" ) # disable GPU
__UpperCAmelCase : str = tf.distribute.OneDeviceStrategy(device=f'''/cpu:{self.device_idx}''' )
return strategy
@property
def _lowerCamelCase ( self: Tuple ) -> bool:
requires_backends(self , ["tf"] )
return self._setup_tpu is not None
@property
def _lowerCamelCase ( self: Union[str, Any] ) -> "tf.distribute.Strategy":
requires_backends(self , ["tf"] )
return self._setup_strategy
@property
def _lowerCamelCase ( self: Optional[Any] ) -> str:
requires_backends(self , ["tf"] )
return tf.config.list_physical_devices("GPU" )
@property
def _lowerCamelCase ( self: List[str] ) -> int:
requires_backends(self , ["tf"] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def _lowerCamelCase ( self: Union[str, Any] ) -> bool:
return self.n_gpu > 0
| 342 | import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''facebook/detr-resnet-50''': '''https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json''',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class _snake_case ( _lowercase ):
lowerCamelCase__: str = "detr"
lowerCamelCase__: Dict = ["past_key_values"]
lowerCamelCase__: str = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self: List[str] , __lowerCamelCase: List[Any]=True , __lowerCamelCase: Any=None , __lowerCamelCase: Dict=3 , __lowerCamelCase: str=1_00 , __lowerCamelCase: Union[str, Any]=6 , __lowerCamelCase: Union[str, Any]=20_48 , __lowerCamelCase: Dict=8 , __lowerCamelCase: Optional[int]=6 , __lowerCamelCase: List[Any]=20_48 , __lowerCamelCase: int=8 , __lowerCamelCase: Tuple=0.0 , __lowerCamelCase: Dict=0.0 , __lowerCamelCase: Any=True , __lowerCamelCase: Tuple="relu" , __lowerCamelCase: Tuple=2_56 , __lowerCamelCase: Dict=0.1 , __lowerCamelCase: Union[str, Any]=0.0 , __lowerCamelCase: Optional[int]=0.0 , __lowerCamelCase: Union[str, Any]=0.02 , __lowerCamelCase: str=1.0 , __lowerCamelCase: List[str]=False , __lowerCamelCase: Dict="sine" , __lowerCamelCase: Optional[int]="resnet50" , __lowerCamelCase: Optional[int]=True , __lowerCamelCase: int=False , __lowerCamelCase: Union[str, Any]=1 , __lowerCamelCase: Tuple=5 , __lowerCamelCase: int=2 , __lowerCamelCase: Dict=1 , __lowerCamelCase: Dict=1 , __lowerCamelCase: Union[str, Any]=5 , __lowerCamelCase: Dict=2 , __lowerCamelCase: int=0.1 , **__lowerCamelCase: str , ) -> int:
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
__UpperCAmelCase : Optional[int] = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : List[Any] = backbone_config.get("model_type" )
__UpperCAmelCase : List[str] = CONFIG_MAPPING[backbone_model_type]
__UpperCAmelCase : List[str] = config_class.from_dict(__lowerCamelCase )
# set timm attributes to None
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = None, None, None
__UpperCAmelCase : Any = use_timm_backbone
__UpperCAmelCase : Optional[Any] = backbone_config
__UpperCAmelCase : Optional[Any] = num_channels
__UpperCAmelCase : List[Any] = num_queries
__UpperCAmelCase : Optional[int] = d_model
__UpperCAmelCase : Optional[Any] = encoder_ffn_dim
__UpperCAmelCase : Dict = encoder_layers
__UpperCAmelCase : List[Any] = encoder_attention_heads
__UpperCAmelCase : int = decoder_ffn_dim
__UpperCAmelCase : Tuple = decoder_layers
__UpperCAmelCase : int = decoder_attention_heads
__UpperCAmelCase : List[Any] = dropout
__UpperCAmelCase : Dict = attention_dropout
__UpperCAmelCase : Optional[Any] = activation_dropout
__UpperCAmelCase : int = activation_function
__UpperCAmelCase : Any = init_std
__UpperCAmelCase : str = init_xavier_std
__UpperCAmelCase : int = encoder_layerdrop
__UpperCAmelCase : Tuple = decoder_layerdrop
__UpperCAmelCase : List[Any] = encoder_layers
__UpperCAmelCase : Optional[Any] = auxiliary_loss
__UpperCAmelCase : int = position_embedding_type
__UpperCAmelCase : Optional[int] = backbone
__UpperCAmelCase : str = use_pretrained_backbone
__UpperCAmelCase : Dict = dilation
# Hungarian matcher
__UpperCAmelCase : Optional[int] = class_cost
__UpperCAmelCase : Optional[Any] = bbox_cost
__UpperCAmelCase : Optional[int] = giou_cost
# Loss coefficients
__UpperCAmelCase : Any = mask_loss_coefficient
__UpperCAmelCase : Any = dice_loss_coefficient
__UpperCAmelCase : Any = bbox_loss_coefficient
__UpperCAmelCase : Optional[int] = giou_loss_coefficient
__UpperCAmelCase : Optional[Any] = eos_coefficient
super().__init__(is_encoder_decoder=__lowerCamelCase , **__lowerCamelCase )
@property
def _lowerCamelCase ( self: Dict ) -> int:
return self.encoder_attention_heads
@property
def _lowerCamelCase ( self: str ) -> int:
return self.d_model
@classmethod
def _lowerCamelCase ( cls: Optional[int] , __lowerCamelCase: PretrainedConfig , **__lowerCamelCase: List[Any] ) -> List[Any]:
return cls(backbone_config=__lowerCamelCase , **__lowerCamelCase )
def _lowerCamelCase ( self: str ) -> Dict[str, any]:
__UpperCAmelCase : Optional[int] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
__UpperCAmelCase : int = self.backbone_config.to_dict()
__UpperCAmelCase : List[str] = self.__class__.model_type
return output
class _snake_case ( _lowercase ):
lowerCamelCase__: Optional[int] = version.parse("1.11" )
@property
def _lowerCamelCase ( self: Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def _lowerCamelCase ( self: Optional[Any] ) -> float:
return 1e-5
@property
def _lowerCamelCase ( self: List[str] ) -> int:
return 12
| 342 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__a = {"configuration_plbart": ["PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "PLBartConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["PLBartTokenizer"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"PLBART_PRETRAINED_MODEL_ARCHIVE_LIST",
"PLBartForCausalLM",
"PLBartForConditionalGeneration",
"PLBartForSequenceClassification",
"PLBartModel",
"PLBartPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 35 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a = {
"configuration_timesformer": ["TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TimesformerConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimesformerModel",
"TimesformerForVideoClassification",
"TimesformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 35 | 1 |
'''simple docstring'''
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
__snake_case : Any = False
__snake_case : str = True
__snake_case : Union[str, Any] = False
if __name__ == "__main__":
__snake_case : Dict = argparse.ArgumentParser()
parser.add_argument(
'--repo_path',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
__snake_case : int = parser.parse_args()
__snake_case : Any = {
'''image_size''': '''sample_size''',
'''num_res_blocks''': '''layers_per_block''',
'''block_channels''': '''block_out_channels''',
'''down_blocks''': '''down_block_types''',
'''up_blocks''': '''up_block_types''',
'''downscale_freq_shift''': '''freq_shift''',
'''resnet_num_groups''': '''norm_num_groups''',
'''resnet_act_fn''': '''act_fn''',
'''resnet_eps''': '''norm_eps''',
'''num_head_channels''': '''attention_head_dim''',
}
__snake_case : Tuple = {
'''time_steps''': '''time_proj''',
'''mid''': '''mid_block''',
'''downsample_blocks''': '''down_blocks''',
'''upsample_blocks''': '''up_blocks''',
}
__snake_case : str = '''''' if has_file(args.repo_path, 'config.json') else '''unet'''
with open(os.path.join(args.repo_path, subfolder, 'config.json'), 'r', encoding='utf-8') as reader:
__snake_case : Any = reader.read()
__snake_case : Optional[int] = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, 'config.json'):
__snake_case : str = UNetaDModel(**config)
else:
__snake_case : str = UNetaDConditionModel if '''ldm-text2im-large-256''' in args.repo_path else UNetaDModel
__snake_case : List[Any] = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
__snake_case : Union[str, Any] = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
__snake_case : List[str] = config[key]
del config[key]
__snake_case : Optional[Any] = [k.replace('UNetRes', '') for k in config['''down_block_types''']]
__snake_case : Tuple = [k.replace('UNetRes', '') for k in config['''up_block_types''']]
if do_only_weights:
__snake_case : Optional[int] = torch.load(os.path.join(args.repo_path, subfolder, 'diffusion_pytorch_model.bin'))
__snake_case : Any = {}
for param_key, param_value in state_dict.items():
if param_key.endswith('.op.bias') or param_key.endswith('.op.weight'):
continue
__snake_case : Any = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split('.')[0] == key:
__snake_case : Optional[int] = param_value
__snake_case : Dict = True
if not has_changed:
__snake_case : Any = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 354 |
'''simple docstring'''
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = (CMStochasticIterativeScheduler,)
__snake_case = 10
def lowercase__ ( self : List[str] , **lowerCAmelCase_ : Dict ) -> Dict:
'''simple docstring'''
A__ : int ={
"""num_train_timesteps""": 2_01,
"""sigma_min""": 0.002,
"""sigma_max""": 80.0,
}
config.update(**lowerCAmelCase_ )
return config
def lowercase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
A__ : Dict =10
A__ : str =self.get_scheduler_config()
A__ : Any =self.scheduler_classes[0](**lowerCAmelCase_ )
scheduler.set_timesteps(lowerCAmelCase_ )
A__ : List[Any] =scheduler.timesteps[0]
A__ : Union[str, Any] =scheduler.timesteps[1]
A__ : Optional[int] =self.dummy_sample
A__ : Union[str, Any] =0.1 * sample
A__ : Dict =scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ).prev_sample
A__ : int =scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase_ )
def lowercase__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=lowerCAmelCase_ )
def lowercase__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
A__ : Union[str, Any] =self.scheduler_classes[0]
A__ : Dict =self.get_scheduler_config()
A__ : Any =scheduler_class(**lowerCAmelCase_ )
A__ : int =1
scheduler.set_timesteps(lowerCAmelCase_ )
A__ : Any =scheduler.timesteps
A__ : Optional[int] =torch.manual_seed(0 )
A__ : List[Any] =self.dummy_model()
A__ : Optional[int] =self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(lowerCAmelCase_ ):
# 1. scale model input
A__ : Optional[Any] =scheduler.scale_model_input(lowerCAmelCase_ , lowerCAmelCase_ )
# 2. predict noise residual
A__ : Union[str, Any] =model(lowerCAmelCase_ , lowerCAmelCase_ )
# 3. predict previous sample x_t-1
A__ : Tuple =scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample
A__ : Dict =pred_prev_sample
A__ : Union[str, Any] =torch.sum(torch.abs(lowerCAmelCase_ ) )
A__ : Optional[Any] =torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 192.7614 ) < 1e-2
assert abs(result_mean.item() - 0.2510 ) < 1e-3
def lowercase__ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
A__ : Dict =self.scheduler_classes[0]
A__ : Dict =self.get_scheduler_config()
A__ : Tuple =scheduler_class(**lowerCAmelCase_ )
A__ : Tuple =[1_06, 0]
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
A__ : List[Any] =scheduler.timesteps
A__ : Optional[Any] =torch.manual_seed(0 )
A__ : int =self.dummy_model()
A__ : int =self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
A__ : Any =scheduler.scale_model_input(lowerCAmelCase_ , lowerCAmelCase_ )
# 2. predict noise residual
A__ : List[str] =model(lowerCAmelCase_ , lowerCAmelCase_ )
# 3. predict previous sample x_t-1
A__ : Dict =scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample
A__ : Union[str, Any] =pred_prev_sample
A__ : Union[str, Any] =torch.sum(torch.abs(lowerCAmelCase_ ) )
A__ : List[Any] =torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 347.6357 ) < 1e-2
assert abs(result_mean.item() - 0.4527 ) < 1e-3
def lowercase__ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
A__ : Optional[Any] =self.scheduler_classes[0]
A__ : Union[str, Any] =self.get_scheduler_config()
A__ : List[Any] =scheduler_class(**lowerCAmelCase_ )
A__ : Tuple =[39, 30, 12, 15, 0]
with self.assertRaises(lowerCAmelCase_ , msg="""`timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
A__ : Union[str, Any] =self.scheduler_classes[0]
A__ : List[str] =self.get_scheduler_config()
A__ : Tuple =scheduler_class(**lowerCAmelCase_ )
A__ : Dict =[39, 30, 12, 1, 0]
A__ : int =len(lowerCAmelCase_ )
with self.assertRaises(lowerCAmelCase_ , msg="""Can only pass one of `num_inference_steps` or `timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=lowerCAmelCase_ , timesteps=lowerCAmelCase_ )
def lowercase__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
A__ : Optional[Any] =self.scheduler_classes[0]
A__ : Any =self.get_scheduler_config()
A__ : Optional[int] =scheduler_class(**lowerCAmelCase_ )
A__ : List[str] =[scheduler.config.num_train_timesteps]
with self.assertRaises(
lowerCAmelCase_ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
| 136 | 0 |
"""simple docstring"""
def __UpperCAmelCase ( __UpperCamelCase = 4_00_00_00 ):
__lowercase : int = [0, 1]
__lowercase : Dict = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
__lowercase : str = 0
for j in range(len(__UpperCamelCase ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(F"{solution() = }")
| 249 |
"""simple docstring"""
class UpperCAmelCase_ :
def __init__( self , UpperCamelCase_ ) -> Tuple:
__lowercase : Any = n
__lowercase : Any = [None] * self.n
__lowercase : Optional[int] = 0 # index of the first element
__lowercase : Optional[int] = 0
__lowercase : Any = 0
def __len__( self ) -> int:
return self.size
def _lowerCamelCase ( self ) -> bool:
return self.size == 0
def _lowerCamelCase ( self ) -> Optional[Any]:
return False if self.is_empty() else self.array[self.front]
def _lowerCamelCase ( self , UpperCamelCase_ ) -> Dict:
if self.size >= self.n:
raise Exception('''QUEUE IS FULL''' )
__lowercase : Any = data
__lowercase : List[Any] = (self.rear + 1) % self.n
self.size += 1
return self
def _lowerCamelCase ( self ) -> List[Any]:
if self.size == 0:
raise Exception('''UNDERFLOW''' )
__lowercase : Any = self.array[self.front]
__lowercase : int = None
__lowercase : Optional[int] = (self.front + 1) % self.n
self.size -= 1
return temp
| 249 | 1 |
from math import factorial
def A ( _lowercase = 20 ):
SCREAMING_SNAKE_CASE : Optional[Any] = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
SCREAMING_SNAKE_CASE : Optional[int] = n // 2
return int(factorial(_lowercase ) / (factorial(_lowercase ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
__UpperCamelCase : int = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number.')
| 367 | from __future__ import annotations
import numpy as np
def A ( _lowercase ):
return np.maximum(0 , _lowercase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 258 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class lowercase ( unittest.TestCase ):
"""simple docstring"""
_a = StableDiffusionLDMaDPipeline
_a = TEXT_TO_IMAGE_PARAMS
_a = TEXT_TO_IMAGE_BATCH_PARAMS
_a = TEXT_TO_IMAGE_IMAGE_PARAMS
def lowerCAmelCase__ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase__ :int = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
UpperCamelCase__ :Tuple = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=UpperCamelCase_ , set_alpha_to_one=UpperCamelCase_ , )
torch.manual_seed(0 )
UpperCamelCase__ :Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCamelCase__ :Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
UpperCamelCase__ :Optional[int] = CLIPTextModel(UpperCamelCase_ )
UpperCamelCase__ :str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCamelCase__ :Any = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=0 ):
'''simple docstring'''
if str(UpperCamelCase_ ).startswith('''mps''' ):
UpperCamelCase__ :Dict = torch.manual_seed(UpperCamelCase_ )
else:
UpperCamelCase__ :List[str] = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
UpperCamelCase__ :List[str] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ :List[Any] = self.get_dummy_components()
UpperCamelCase__ :Any = StableDiffusionLDMaDPipeline(**UpperCamelCase_ )
UpperCamelCase__ :str = ldmad_pipe.to(UpperCamelCase_ )
ldmad_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
UpperCamelCase__ :int = self.get_dummy_inputs(UpperCamelCase_ )
UpperCamelCase__ :str = ldmad_pipe(**UpperCamelCase_ )
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = output.rgb, output.depth
UpperCamelCase__ :List[Any] = rgb[0, -3:, -3:, -1]
UpperCamelCase__ :Optional[int] = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
UpperCamelCase__ :Any = np.array(
[0.37338176, 0.70247, 0.74203193, 0.51643604, 0.58256793, 0.60932136, 0.4181095, 0.48355877, 0.46535262] )
UpperCamelCase__ :Optional[int] = np.array([103.46727, 85.812004, 87.849236] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1e-2
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = self.get_dummy_components()
UpperCamelCase__ :Optional[Any] = StableDiffusionLDMaDPipeline(**UpperCamelCase_ )
UpperCamelCase__ :Dict = ldmad_pipe.to(UpperCamelCase_ )
ldmad_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
UpperCamelCase__ :str = self.get_dummy_inputs(UpperCamelCase_ )
UpperCamelCase__ :Optional[Any] = 3 * [inputs['''prompt''']]
# forward
UpperCamelCase__ :int = ldmad_pipe(**UpperCamelCase_ )
UpperCamelCase__ , UpperCamelCase__ :Tuple = output.rgb, output.depth
UpperCamelCase__ :Union[str, Any] = rgb_slice_a[0, -3:, -3:, -1]
UpperCamelCase__ :Union[str, Any] = depth_slice_a[0, -3:, -1]
UpperCamelCase__ :Dict = self.get_dummy_inputs(UpperCamelCase_ )
UpperCamelCase__ :Optional[Any] = 3 * [inputs.pop('''prompt''' )]
UpperCamelCase__ :str = ldmad_pipe.tokenizer(
UpperCamelCase_ , padding='''max_length''' , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=UpperCamelCase_ , return_tensors='''pt''' , )
UpperCamelCase__ :Optional[Any] = text_inputs['''input_ids'''].to(UpperCamelCase_ )
UpperCamelCase__ :str = ldmad_pipe.text_encoder(UpperCamelCase_ )[0]
UpperCamelCase__ :Optional[int] = prompt_embeds
# forward
UpperCamelCase__ :Tuple = ldmad_pipe(**UpperCamelCase_ )
UpperCamelCase__ , UpperCamelCase__ :int = output.rgb, output.depth
UpperCamelCase__ :List[Any] = rgb_slice_a[0, -3:, -3:, -1]
UpperCamelCase__ :List[Any] = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1e-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1e-4
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ :List[str] = self.get_dummy_components()
UpperCamelCase__ :Tuple = PNDMScheduler(skip_prk_steps=UpperCamelCase_ )
UpperCamelCase__ :List[str] = StableDiffusionLDMaDPipeline(**UpperCamelCase_ )
UpperCamelCase__ :int = ldmad_pipe.to(UpperCamelCase_ )
ldmad_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
UpperCamelCase__ :List[Any] = self.get_dummy_inputs(UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = '''french fries'''
UpperCamelCase__ :List[Any] = ldmad_pipe(**UpperCamelCase_ , negative_prompt=UpperCamelCase_ )
UpperCamelCase__ , UpperCamelCase__ :List[Any] = output.rgb, output.depth
UpperCamelCase__ :Tuple = rgb[0, -3:, -3:, -1]
UpperCamelCase__ :Any = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
UpperCamelCase__ :Union[str, Any] = np.array(
[0.37044, 0.71811503, 0.7223251, 0.48603675, 0.5638391, 0.6364948, 0.42833704, 0.4901315, 0.47926217] )
UpperCamelCase__ :List[str] = np.array([107.84738, 84.62802, 89.962135] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1e-2
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_="cpu" , UpperCamelCase_=torch.floataa , UpperCamelCase_=0 ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
UpperCamelCase__ :int = np.random.RandomState(UpperCamelCase_ ).standard_normal((1, 4, 64, 64) )
UpperCamelCase__ :Any = torch.from_numpy(UpperCamelCase_ ).to(device=UpperCamelCase_ , dtype=UpperCamelCase_ )
UpperCamelCase__ :Optional[Any] = {
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = StableDiffusionLDMaDPipeline.from_pretrained('''Intel/ldm3d''' )
UpperCamelCase__ :int = ldmad_pipe.to(UpperCamelCase_ )
ldmad_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
UpperCamelCase__ :List[Any] = self.get_inputs(UpperCamelCase_ )
UpperCamelCase__ :Tuple = ldmad_pipe(**UpperCamelCase_ )
UpperCamelCase__ , UpperCamelCase__ :List[str] = output.rgb, output.depth
UpperCamelCase__ :List[str] = rgb[0, -3:, -3:, -1].flatten()
UpperCamelCase__ :Optional[int] = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512)
UpperCamelCase__ :List[str] = np.array(
[0.53805465, 0.56707305, 0.5486515, 0.57012236, 0.5814511, 0.56253487, 0.54843014, 0.55092263, 0.6459706] )
UpperCamelCase__ :Optional[int] = np.array(
[0.9263781, 0.6678672, 0.5486515, 0.92202145, 0.67831135, 0.56253487, 0.9241694, 0.7551478, 0.6459706] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3e-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3e-3
@nightly
@require_torch_gpu
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_="cpu" , UpperCamelCase_=torch.floataa , UpperCamelCase_=0 ):
'''simple docstring'''
UpperCamelCase__ :List[str] = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
UpperCamelCase__ :List[str] = np.random.RandomState(UpperCamelCase_ ).standard_normal((1, 4, 64, 64) )
UpperCamelCase__ :Any = torch.from_numpy(UpperCamelCase_ ).to(device=UpperCamelCase_ , dtype=UpperCamelCase_ )
UpperCamelCase__ :List[str] = {
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 50,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :str = StableDiffusionLDMaDPipeline.from_pretrained('''Intel/ldm3d''' ).to(UpperCamelCase_ )
ldmad_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
UpperCamelCase__ :Any = self.get_inputs(UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = ldmad_pipe(**UpperCamelCase_ )
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = output.rgb, output.depth
UpperCamelCase__ :Union[str, Any] = 0.495586
UpperCamelCase__ :List[str] = 0.33795515
UpperCamelCase__ :Tuple = 112.48518
UpperCamelCase__ :int = 98.489746
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :str = StableDiffusionLDMaDPipeline.from_pretrained('''Intel/ldm3d-4c''' ).to(UpperCamelCase_ )
ldmad_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
UpperCamelCase__ :Dict = self.get_inputs(UpperCamelCase_ )
UpperCamelCase__ :Any = ldmad_pipe(**UpperCamelCase_ )
UpperCamelCase__ , UpperCamelCase__ :Dict = output.rgb, output.depth
UpperCamelCase__ :int = 0.4194127
UpperCamelCase__ :Optional[int] = 0.35375586
UpperCamelCase__ :int = 0.5638502
UpperCamelCase__ :Optional[int] = 0.34686103
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3 | 97 |
'''simple docstring'''
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class lowercase ( A__ ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(
features=UpperCamelCase_ , cache_dir=UpperCamelCase_ , keep_in_memory=UpperCamelCase_ , streaming=UpperCamelCase_ , num_proc=UpperCamelCase_ , **UpperCamelCase_ , )
UpperCamelCase__ :Any = Generator(
cache_dir=UpperCamelCase_ , features=UpperCamelCase_ , generator=UpperCamelCase_ , gen_kwargs=UpperCamelCase_ , **UpperCamelCase_ , )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
if self.streaming:
UpperCamelCase__ :Optional[Any] = self.builder.as_streaming_dataset(split='''train''' )
# Build regular (map-style) dataset
else:
UpperCamelCase__ :Optional[int] = None
UpperCamelCase__ :int = None
UpperCamelCase__ :Any = None
UpperCamelCase__ :Any = None
self.builder.download_and_prepare(
download_config=UpperCamelCase_ , download_mode=UpperCamelCase_ , verification_mode=UpperCamelCase_ , base_path=UpperCamelCase_ , num_proc=self.num_proc , )
UpperCamelCase__ :List[Any] = self.builder.as_dataset(
split='''train''' , verification_mode=UpperCamelCase_ , in_memory=self.keep_in_memory )
return dataset | 97 | 1 |
"""simple docstring"""
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class snake_case__ ( snake_case_ ):
def a__ ( self ):
__a = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def a__ ( self ):
with self.assertRaises(lowerCamelCase ):
__a = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def a__ ( self ):
with self.assertRaises(lowerCamelCase ):
__a = pa.array(TypedSequence([1, 2, 3] , try_type=Value("bool" ) , type=Value("int64" ) ) )
def a__ ( self ):
__a = pa.array(TypedSequence([1, 2, 3] , type=Value("int32" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def a__ ( self ):
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
__a = pa.array(TypedSequence(["foo", "bar"] , type=Value("int64" ) ) )
def a__ ( self ):
__a = pa.array(TypedSequence([1, 2, 3] , try_type=Value("int32" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def a__ ( self ):
__a = pa.array(TypedSequence(["foo", "bar"] , try_type=Value("int64" ) ) )
self.assertEqual(arr.type , pa.string() )
def a__ ( self ):
__a = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64" ) )
def a__ ( self ):
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
__a = pa.array(TypedSequence(["foo", "bar"] , type=ArrayaD((1, 3) , "int64" ) ) )
def a__ ( self ):
__a = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64" ) )
def a__ ( self ):
__a = pa.array(TypedSequence(["foo", "bar"] , try_type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def a__ ( self ):
import PIL.Image
__a = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
"datasets.arrow_writer.cast_to_python_objects" , side_effect=lowerCamelCase ) as mock_cast_to_python_objects:
__a = pa.array(TypedSequence([{"path": None, "bytes": b"image_bytes"}, pil_image] , type=Image() ) )
__a , __a = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn("optimize_list_casting" , lowerCamelCase )
self.assertFalse(kwargs["optimize_list_casting"] )
def _lowerCamelCase( a , a ):
__a = pa.BufferReader(a ) if isinstance(a , pa.Buffer ) else pa.memory_map(a )
__a = pa.ipc.open_stream(a )
__a = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 1_0] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def _lowerCamelCase( a , a ):
__a = pa.BufferOutputStream()
__a = pa.schema(a ) if fields else None
with ArrowWriter(stream=a , schema=a , writer_batch_size=a ) as writer:
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
__a , __a = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
__a = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(a , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def _lowerCamelCase( ):
__a = pa.BufferOutputStream()
__a = Features({"labels": ClassLabel(names=["neg", "pos"] )} )
with ArrowWriter(stream=a , features=a ) as writer:
writer.write({"labels": 0} )
writer.write({"labels": 1} )
__a , __a = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
__a = pa.BufferReader(output.getvalue() )
__a = pa.ipc.open_stream(a )
__a = f.read_all()
__a = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(a )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 1_0] )
def _lowerCamelCase( a ):
__a = pa.BufferOutputStream()
with ArrowWriter(
stream=a , writer_batch_size=a , hash_salt="split_name" , check_duplicates=a , ) as writer:
with pytest.raises(a ):
writer.write({"col_1": "foo", "col_2": 1} , key=[1, 2] )
__a , __a = writer.finalize()
@pytest.mark.parametrize("writer_batch_size" , [None, 2, 1_0] )
def _lowerCamelCase( a ):
__a = pa.BufferOutputStream()
with ArrowWriter(
stream=a , writer_batch_size=a , hash_salt="split_name" , check_duplicates=a , ) as writer:
with pytest.raises(a ):
writer.write({"col_1": "foo", "col_2": 1} , key=1_0 )
writer.write({"col_1": "bar", "col_2": 2} , key=1_0 )
__a , __a = writer.finalize()
@pytest.mark.parametrize("writer_batch_size" , [None, 2, 1_0] )
def _lowerCamelCase( a ):
__a = pa.BufferOutputStream()
with ArrowWriter(
stream=a , writer_batch_size=a , hash_salt="split_name" , check_duplicates=a , ) as writer:
writer.write({"col_1": "foo", "col_2": 1} , key=1 )
writer.write({"col_1": "bar", "col_2": 2} , key=2 )
__a , __a = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 1_0] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def _lowerCamelCase( a , a ):
__a = pa.BufferOutputStream()
__a = pa.schema(a ) if fields else None
with ArrowWriter(stream=a , schema=a , writer_batch_size=a ) as writer:
writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} )
writer.write_batch({"col_1": [], "col_2": []} )
__a , __a = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
__a = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(a , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 1_0] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def _lowerCamelCase( a , a ):
__a = pa.BufferOutputStream()
__a = pa.schema(a ) if fields else None
with ArrowWriter(stream=a , schema=a , writer_batch_size=a ) as writer:
writer.write_table(pa.Table.from_pydict({"col_1": ["foo", "bar"], "col_2": [1, 2]} ) )
__a , __a = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
__a = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(a , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 1_0] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def _lowerCamelCase( a , a ):
__a = pa.BufferOutputStream()
__a = pa.schema(a ) if fields else None
with ArrowWriter(stream=a , schema=a , writer_batch_size=a ) as writer:
writer.write_row(pa.Table.from_pydict({"col_1": ["foo"], "col_2": [1]} ) )
writer.write_row(pa.Table.from_pydict({"col_1": ["bar"], "col_2": [2]} ) )
__a , __a = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
__a = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(a , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def _lowerCamelCase( ):
with tempfile.TemporaryDirectory() as tmp_dir:
__a = {"col_1": pa.string(), "col_2": pa.intaa()}
__a = os.path.join(a , "test.arrow" )
with ArrowWriter(path=a , schema=pa.schema(a ) ) as writer:
writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} )
__a , __a = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(a , metadata=writer._schema.metadata )
_check_output(a , 1 )
def _lowerCamelCase( a ):
if pa.types.is_list(a ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def _lowerCamelCase( a , a ):
if isinstance(lst[0] , a ):
change_first_primitive_element_in_list(lst[0] , a )
else:
__a = value
@pytest.mark.parametrize("optimized_int_type, expected_dtype" , [(None, pa.intaa()), (Value("int32" ), pa.intaa())] )
@pytest.mark.parametrize("sequence" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def _lowerCamelCase( a , a , a ):
__a = pa.array(TypedSequence(a , optimized_int_type=a ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
"col, expected_dtype" , [
("attention_mask", pa.inta()),
("special_tokens_mask", pa.inta()),
("token_type_ids", pa.inta()),
("input_ids", pa.intaa()),
("other", pa.intaa()),
] , )
@pytest.mark.parametrize("sequence" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def _lowerCamelCase( a , a , a ):
# in range
__a = pa.array(OptimizedTypedSequence(a , col=a ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
__a = copy.deepcopy(a )
__a = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(a , a )
__a = pa.array(OptimizedTypedSequence(a , col=a ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize("raise_exception" , [False, True] )
def _lowerCamelCase( a , a ):
__a = str(tmp_path / "dataset-train.arrow" )
try:
with ArrowWriter(path=a ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def _lowerCamelCase( a ):
__a = "mock://dataset-train.arrow"
with ArrowWriter(path=a , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(a ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
__a , __a = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(a )
def _lowerCamelCase( ):
__a = pa.BufferOutputStream()
with ParquetWriter(stream=a ) as writer:
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
__a , __a = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
__a = pa.BufferReader(output.getvalue() )
__a = pq.read_table(a )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize("embed_local_files" , [False, True] )
def _lowerCamelCase( a , a ):
import PIL.Image
__a = str(tmp_path / "test_image_rgb.jpg" )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(a , format="png" )
__a = pa.BufferOutputStream()
with ParquetWriter(
stream=a , features=Features({"image": Image()} ) , embed_local_files=a ) as writer:
writer.write({"image": image_path} )
writer.finalize()
__a = pa.BufferReader(output.getvalue() )
__a = pq.read_table(a )
__a = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out["image"][0]["path"] , a )
with open(a , "rb" ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def _lowerCamelCase( ):
__a = pa.schema([pa.field("col_1" , pa.string() , nullable=a )] )
__a = pa.BufferOutputStream()
with ArrowWriter(stream=a ) as writer:
writer._build_writer(inferred_schema=a )
assert writer._schema == pa.schema([pa.field("col_1" , pa.string() )] )
| 268 | """simple docstring"""
import math
def _lowerCamelCase( a ):
__a = []
__a = 2
__a = int(math.sqrt(a ) ) # Size of every segment
__a = [True] * (end + 1)
__a = []
while start <= end:
if temp[start] is True:
in_prime.append(a )
for i in range(start * start , end + 1 , a ):
__a = False
start += 1
prime += in_prime
__a = end + 1
__a = min(2 * end , a )
while low <= n:
__a = [True] * (high - low + 1)
for each in in_prime:
__a = math.floor(low / each ) * each
if t < low:
t += each
for j in range(a , high + 1 , a ):
__a = False
for j in range(len(a ) ):
if temp[j] is True:
prime.append(j + low )
__a = high + 1
__a = min(high + end , a )
return prime
print(sieve(10**6))
| 268 | 1 |
'''simple docstring'''
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
A__ : Dict ='''2.13.1'''
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('''3.7'''):
raise ImportWarning(
'''To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'''
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'''To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'''
'''If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'''
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
A__ : Tuple =concatenate_datasets
A__ : Dict =DownloadConfig
A__ : int =DownloadManager
A__ : Union[str, Any] =DownloadMode
A__ : Tuple =DownloadConfig
A__ : Optional[Any] =DownloadMode
A__ : str =DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 70 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__A =logging.get_logger(__name__)
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
lowerCamelCase_ = 1_9_2
lowerCamelCase_ = 7_6_8
lowerCamelCase_ = 1_2
lowerCamelCase_ = 3
lowerCamelCase_ = [8_0_0, 1_3_3_3]
lowerCamelCase_ = False
elif yolos_name == "yolos_s_dWr":
lowerCamelCase_ = 3_3_0
lowerCamelCase_ = 1_4
lowerCamelCase_ = 6
lowerCamelCase_ = 1_3_2_0
elif "yolos_s" in yolos_name:
lowerCamelCase_ = 3_8_4
lowerCamelCase_ = 1_5_3_6
lowerCamelCase_ = 1_2
lowerCamelCase_ = 6
elif "yolos_b" in yolos_name:
lowerCamelCase_ = [8_0_0, 1_3_4_4]
lowerCamelCase_ = 9_1
lowerCamelCase_ = "huggingface/label-files"
lowerCamelCase_ = "coco-detection-id2label.json"
lowerCamelCase_ = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="dataset" ) , "r" ) )
lowerCamelCase_ = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
lowerCamelCase_ = idalabel
lowerCamelCase_ = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase_ = state_dict.pop(F'blocks.{i}.attn.qkv.weight' )
lowerCamelCase_ = state_dict.pop(F'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase_ = in_proj_weight[: config.hidden_size, :]
lowerCamelCase_ = in_proj_bias[: config.hidden_size]
lowerCamelCase_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase_ = in_proj_weight[-config.hidden_size :, :]
lowerCamelCase_ = in_proj_bias[-config.hidden_size :]
def lowerCamelCase_ ( lowerCamelCase__ ):
if "backbone" in name:
lowerCamelCase_ = name.replace("backbone" , "vit" )
if "cls_token" in name:
lowerCamelCase_ = name.replace("cls_token" , "embeddings.cls_token" )
if "det_token" in name:
lowerCamelCase_ = name.replace("det_token" , "embeddings.detection_tokens" )
if "mid_pos_embed" in name:
lowerCamelCase_ = name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" )
if "pos_embed" in name:
lowerCamelCase_ = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
lowerCamelCase_ = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "blocks" in name:
lowerCamelCase_ = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
lowerCamelCase_ = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
lowerCamelCase_ = name.replace("attn" , "attention.self" )
if "norm1" in name:
lowerCamelCase_ = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
lowerCamelCase_ = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
lowerCamelCase_ = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
lowerCamelCase_ = name.replace("mlp.fc2" , "output.dense" )
if "class_embed" in name:
lowerCamelCase_ = name.replace("class_embed" , "class_labels_classifier" )
if "bbox_embed" in name:
lowerCamelCase_ = name.replace("bbox_embed" , "bbox_predictor" )
if "vit.norm" in name:
lowerCamelCase_ = name.replace("vit.norm" , "vit.layernorm" )
return name
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
for key in orig_state_dict.copy().keys():
lowerCamelCase_ = orig_state_dict.pop(lowerCamelCase__ )
if "qkv" in key:
lowerCamelCase_ = key.split("." )
lowerCamelCase_ = int(key_split[2] )
lowerCamelCase_ = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
lowerCamelCase_ = val[:dim, :]
lowerCamelCase_ = val[
dim : dim * 2, :
]
lowerCamelCase_ = val[-dim:, :]
else:
lowerCamelCase_ = val[:dim]
lowerCamelCase_ = val[dim : dim * 2]
lowerCamelCase_ = val[-dim:]
else:
lowerCamelCase_ = val
return orig_state_dict
def lowerCamelCase_ ( ):
lowerCamelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase_ = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
return im
@torch.no_grad()
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ):
lowerCamelCase_ = get_yolos_config(lowerCamelCase__ )
# load original state_dict
lowerCamelCase_ = torch.load(lowerCamelCase__ , map_location="cpu" )["model"]
# load 🤗 model
lowerCamelCase_ = YolosForObjectDetection(lowerCamelCase__ )
model.eval()
lowerCamelCase_ = convert_state_dict(lowerCamelCase__ , lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__ )
# Check outputs on an image, prepared by YolosImageProcessor
lowerCamelCase_ = 8_0_0 if yolos_name != "yolos_ti" else 5_1_2
lowerCamelCase_ = YolosImageProcessor(format="coco_detection" , size=lowerCamelCase__ )
lowerCamelCase_ = image_processor(images=prepare_img() , return_tensors="pt" )
lowerCamelCase_ = model(**lowerCamelCase__ )
lowerCamelCase_ , lowerCamelCase_ = outputs.logits, outputs.pred_boxes
lowerCamelCase_ , lowerCamelCase_ = None, None
if yolos_name == "yolos_ti":
lowerCamelCase_ = torch.tensor(
[[-39.50_22, -11.98_20, -17.68_88], [-29.95_74, -9.97_69, -17.76_91], [-42.32_81, -20.72_00, -30.62_94]] )
lowerCamelCase_ = torch.tensor(
[[0.40_21, 0.08_36, 0.79_79], [0.01_84, 0.26_09, 0.03_64], [0.17_81, 0.20_04, 0.20_95]] )
elif yolos_name == "yolos_s_200_pre":
lowerCamelCase_ = torch.tensor(
[[-24.02_48, -10.30_24, -14.82_90], [-42.03_92, -16.82_00, -27.43_34], [-27.27_43, -11.81_54, -18.71_48]] )
lowerCamelCase_ = torch.tensor(
[[0.25_59, 0.54_55, 0.47_06], [0.29_89, 0.72_79, 0.18_75], [0.77_32, 0.40_17, 0.44_62]] )
elif yolos_name == "yolos_s_300_pre":
lowerCamelCase_ = torch.tensor(
[[-36.22_20, -14.43_85, -23.54_57], [-35.69_70, -14.75_83, -21.39_35], [-31.59_39, -13.60_42, -16.80_49]] )
lowerCamelCase_ = torch.tensor(
[[0.76_14, 0.23_16, 0.47_28], [0.71_68, 0.44_95, 0.38_55], [0.49_96, 0.14_66, 0.99_96]] )
elif yolos_name == "yolos_s_dWr":
lowerCamelCase_ = torch.tensor(
[[-42.86_68, -24.10_49, -41.16_90], [-34.74_56, -14.12_74, -24.91_94], [-33.78_98, -12.19_46, -25.64_95]] )
lowerCamelCase_ = torch.tensor(
[[0.55_87, 0.27_73, 0.06_05], [0.50_04, 0.30_14, 0.99_94], [0.49_99, 0.15_48, 0.99_94]] )
elif yolos_name == "yolos_base":
lowerCamelCase_ = torch.tensor(
[[-40.60_64, -24.30_84, -32.64_47], [-55.19_90, -30.77_19, -35.58_77], [-51.43_11, -33.35_07, -35.64_62]] )
lowerCamelCase_ = torch.tensor(
[[0.55_55, 0.27_94, 0.06_55], [0.90_49, 0.26_64, 0.18_94], [0.91_83, 0.19_84, 0.16_35]] )
else:
raise ValueError(F'Unknown yolos_name: {yolos_name}' )
assert torch.allclose(logits[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(F'Saving model {yolos_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
lowerCamelCase_ = {
"yolos_ti": "yolos-tiny",
"yolos_s_200_pre": "yolos-small",
"yolos_s_300_pre": "yolos-small-300",
"yolos_s_dWr": "yolos-small-dwr",
"yolos_base": "yolos-base",
}
print("Pushing to the hub..." )
lowerCamelCase_ = model_mapping[yolos_name]
image_processor.push_to_hub(lowerCamelCase__ , organization="hustvl" )
model.push_to_hub(lowerCamelCase__ , organization="hustvl" )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__A =parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 19 | 0 |
"""simple docstring"""
from __future__ import annotations
class lowercase__ :
'''simple docstring'''
def __init__( self : str , _UpperCAmelCase : int = 0 ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = key
def lowercase__ ( self : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : int ) -> list[str]:
'''simple docstring'''
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase_ = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_UpperCAmelCase ) ^ key ) for ch in content]
def lowercase__ ( self : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : int ) -> list[str]:
'''simple docstring'''
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase_ = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_UpperCAmelCase ) ^ key ) for ch in content]
def lowercase__ ( self : Tuple , _UpperCAmelCase : str , _UpperCAmelCase : int = 0 ) -> str:
'''simple docstring'''
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase_ = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
UpperCAmelCase_ = ""
for ch in content:
ans += chr(ord(_UpperCAmelCase ) ^ key )
return ans
def lowercase__ ( self : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : int = 0 ) -> str:
'''simple docstring'''
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase_ = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
UpperCAmelCase_ = ""
for ch in content:
ans += chr(ord(_UpperCAmelCase ) ^ key )
return ans
def lowercase__ ( self : Tuple , _UpperCAmelCase : str , _UpperCAmelCase : int = 0 ) -> bool:
'''simple docstring'''
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(_UpperCAmelCase , _UpperCAmelCase )
try:
with open(_UpperCAmelCase ) as fin, open("encrypt.out" , "w+" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(_UpperCAmelCase , _UpperCAmelCase ) )
except OSError:
return False
return True
def lowercase__ ( self : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : int ) -> bool:
'''simple docstring'''
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(_UpperCAmelCase , _UpperCAmelCase )
try:
with open(_UpperCAmelCase ) as fin, open("decrypt.out" , "w+" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(_UpperCAmelCase , _UpperCAmelCase ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 241 |
"""simple docstring"""
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = XCLIPTextConfig()
# derive patch size from model name
UpperCAmelCase_ = model_name.find("patch" )
UpperCAmelCase_ = int(model_name[start_idx + len("patch" ) : start_idx + len("patch" ) + 2] )
UpperCAmelCase_ = XCLIPVisionConfig(patch_size=lowerCAmelCase__ , num_frames=lowerCAmelCase__ )
if "large" in model_name:
UpperCAmelCase_ = 768
UpperCAmelCase_ = 3072
UpperCAmelCase_ = 12
UpperCAmelCase_ = 1024
UpperCAmelCase_ = 4096
UpperCAmelCase_ = 16
UpperCAmelCase_ = 24
UpperCAmelCase_ = 768
UpperCAmelCase_ = 3072
if model_name == "xclip-large-patch14-16-frames":
UpperCAmelCase_ = 336
UpperCAmelCase_ = XCLIPConfig.from_text_vision_configs(lowerCAmelCase__ , lowerCAmelCase__ )
if "large" in model_name:
UpperCAmelCase_ = 768
return config
def a__ ( lowerCAmelCase__ ):
# text encoder
if name == "token_embedding.weight":
UpperCAmelCase_ = name.replace("token_embedding.weight" , "text_model.embeddings.token_embedding.weight" )
if name == "positional_embedding":
UpperCAmelCase_ = name.replace("positional_embedding" , "text_model.embeddings.position_embedding.weight" )
if "ln_1" in name:
UpperCAmelCase_ = name.replace("ln_1" , "layer_norm1" )
if "ln_2" in name:
UpperCAmelCase_ = name.replace("ln_2" , "layer_norm2" )
if "c_fc" in name:
UpperCAmelCase_ = name.replace("c_fc" , "fc1" )
if "c_proj" in name:
UpperCAmelCase_ = name.replace("c_proj" , "fc2" )
if name.startswith("transformer.resblocks" ):
UpperCAmelCase_ = name.replace("transformer.resblocks" , "text_model.encoder.layers" )
if "attn.out_proj" in name and "message" not in name:
UpperCAmelCase_ = name.replace("attn.out_proj" , "self_attn.out_proj" )
if "ln_final" in name:
UpperCAmelCase_ = name.replace("ln_final" , "text_model.final_layer_norm" )
# visual encoder
if name == "visual.class_embedding":
UpperCAmelCase_ = name.replace("visual.class_embedding" , "vision_model.embeddings.class_embedding" )
if name == "visual.positional_embedding":
UpperCAmelCase_ = name.replace("visual.positional_embedding" , "vision_model.embeddings.position_embedding.weight" )
if name.startswith("visual.transformer.resblocks" ):
UpperCAmelCase_ = name.replace("visual.transformer.resblocks" , "vision_model.encoder.layers" )
if "visual.conv1" in name:
UpperCAmelCase_ = name.replace("visual.conv1" , "vision_model.embeddings.patch_embedding" )
if "visual.ln_pre" in name:
UpperCAmelCase_ = name.replace("visual.ln_pre" , "vision_model.pre_layernorm" )
if "visual.ln_post" in name:
UpperCAmelCase_ = name.replace("visual.ln_post" , "vision_model.post_layernorm" )
if "visual.proj" in name:
UpperCAmelCase_ = name.replace("visual.proj" , "visual_projection.weight" )
if "text_projection" in name:
UpperCAmelCase_ = name.replace("text_projection" , "text_projection.weight" )
# things on top
if "prompts_visual_proj" in name:
UpperCAmelCase_ = name.replace("prompts_visual_proj" , "prompts_visual_projection" )
if "prompts_visual_ln" in name:
UpperCAmelCase_ = name.replace("prompts_visual_ln" , "prompts_visual_layernorm" )
# mit
if name == "mit.positional_embedding":
UpperCAmelCase_ = name.replace("positional" , "position" )
if name.startswith("mit.resblocks" ):
UpperCAmelCase_ = name.replace("mit.resblocks" , "mit.encoder.layers" )
# prompts generator
if name.startswith("prompts_generator.norm" ):
UpperCAmelCase_ = name.replace("prompts_generator.norm" , "prompts_generator.layernorm" )
return name
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
for key in orig_state_dict.copy().keys():
UpperCAmelCase_ = orig_state_dict.pop(lowerCAmelCase__ )
if "attn.in_proj" in key:
UpperCAmelCase_ = key.split("." )
if key.startswith("visual" ):
UpperCAmelCase_ = key_split[3]
UpperCAmelCase_ = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
UpperCAmelCase_ = val[
:dim, :
]
UpperCAmelCase_ = val[
dim : dim * 2, :
]
UpperCAmelCase_ = val[
-dim:, :
]
else:
UpperCAmelCase_ = val[
:dim
]
UpperCAmelCase_ = val[
dim : dim * 2
]
UpperCAmelCase_ = val[
-dim:
]
else:
if "weight" in key:
UpperCAmelCase_ = val[
:dim, :
]
UpperCAmelCase_ = val[
dim : dim * 2, :
]
UpperCAmelCase_ = val[
-dim:, :
]
else:
UpperCAmelCase_ = val[:dim]
UpperCAmelCase_ = val[
dim : dim * 2
]
UpperCAmelCase_ = val[-dim:]
elif key.startswith("mit" ):
UpperCAmelCase_ = key_split[2]
UpperCAmelCase_ = config.vision_config.mit_hidden_size
if "weight" in key:
UpperCAmelCase_ = val[:dim, :]
UpperCAmelCase_ = val[dim : dim * 2, :]
UpperCAmelCase_ = val[-dim:, :]
else:
UpperCAmelCase_ = val[:dim]
UpperCAmelCase_ = val[dim : dim * 2]
UpperCAmelCase_ = val[-dim:]
else:
UpperCAmelCase_ = key_split[2]
UpperCAmelCase_ = config.text_config.hidden_size
if "weight" in key:
UpperCAmelCase_ = val[:dim, :]
UpperCAmelCase_ = val[
dim : dim * 2, :
]
UpperCAmelCase_ = val[-dim:, :]
else:
UpperCAmelCase_ = val[:dim]
UpperCAmelCase_ = val[
dim : dim * 2
]
UpperCAmelCase_ = val[-dim:]
else:
UpperCAmelCase_ = rename_key(lowerCAmelCase__ )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
UpperCAmelCase_ = val.T
UpperCAmelCase_ = val
return orig_state_dict
def a__ ( lowerCAmelCase__ ):
if num_frames == 8:
UpperCAmelCase_ = "eating_spaghetti_8_frames.npy"
elif num_frames == 16:
UpperCAmelCase_ = "eating_spaghetti.npy"
elif num_frames == 32:
UpperCAmelCase_ = "eating_spaghetti_32_frames.npy"
UpperCAmelCase_ = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename=lowerCAmelCase__ , repo_type="dataset" , )
UpperCAmelCase_ = np.load(lowerCAmelCase__ )
return list(lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=False ):
UpperCAmelCase_ = {
# fully supervised kinetics-400 checkpoints
"xclip-base-patch32": "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth",
"xclip-base-patch32-16-frames": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth"
),
"xclip-base-patch16": "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth",
"xclip-base-patch16-16-frames": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth"
),
"xclip-large-patch14": "https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb",
"xclip-large-patch14-16-frames": "https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f",
# fully supervised kinetics-600 checkpoints
"xclip-base-patch16-kinetics-600": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth"
),
"xclip-base-patch16-kinetics-600-16-frames": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth"
),
"xclip-large-patch14-kinetics-600": "https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be",
# few shot
"xclip-base-patch16-hmdb-2-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth"
),
"xclip-base-patch16-hmdb-4-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth"
),
"xclip-base-patch16-hmdb-8-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth"
),
"xclip-base-patch16-hmdb-16-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth"
),
"xclip-base-patch16-ucf-2-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth"
),
"xclip-base-patch16-ucf-4-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth"
),
"xclip-base-patch16-ucf-8-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth"
),
"xclip-base-patch16-ucf-16-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth"
),
# zero shot
"xclip-base-patch16-zero-shot": "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth",
}
UpperCAmelCase_ = model_to_url[model_name]
UpperCAmelCase_ = 8
if "16-frames" in model_name:
UpperCAmelCase_ = 16
elif "shot" in model_name:
UpperCAmelCase_ = 32
UpperCAmelCase_ = get_xclip_config(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ = XCLIPModel(lowerCAmelCase__ )
model.eval()
if "drive" in checkpoint_url:
UpperCAmelCase_ = "pytorch_model.bin"
gdown.cached_download(lowerCAmelCase__ , lowerCAmelCase__ , quiet=lowerCAmelCase__ )
UpperCAmelCase_ = torch.load(lowerCAmelCase__ , map_location="cpu" )["model"]
else:
UpperCAmelCase_ = torch.hub.load_state_dict_from_url(lowerCAmelCase__ )["model"]
UpperCAmelCase_ = convert_state_dict(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ = XCLIPModel(lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
UpperCAmelCase_ = 336 if model_name == "xclip-large-patch14-16-frames" else 224
UpperCAmelCase_ = VideoMAEImageProcessor(size=lowerCAmelCase__ )
UpperCAmelCase_ = CLIPTokenizer.from_pretrained("openai/clip-vit-base-patch32" )
UpperCAmelCase_ = CLIPTokenizerFast.from_pretrained("openai/clip-vit-base-patch32" )
UpperCAmelCase_ = XCLIPProcessor(image_processor=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ )
UpperCAmelCase_ = prepare_video(lowerCAmelCase__ )
UpperCAmelCase_ = processor(
text=["playing sports", "eating spaghetti", "go shopping"] , videos=lowerCAmelCase__ , return_tensors="pt" , padding=lowerCAmelCase__ )
print("Shape of pixel values:" , inputs.pixel_values.shape )
with torch.no_grad():
UpperCAmelCase_ = model(**lowerCAmelCase__ )
# Verify outputs
UpperCAmelCase_ = outputs.logits_per_video
UpperCAmelCase_ = logits_per_video.softmax(dim=1 )
print("Probs:" , lowerCAmelCase__ )
# kinetics-400
if model_name == "xclip-base-patch32":
UpperCAmelCase_ = torch.tensor([[0.0019, 0.9951, 0.0030]] )
elif model_name == "xclip-base-patch32-16-frames":
UpperCAmelCase_ = torch.tensor([[7.0999e-04, 9.9883e-01, 4.5580e-04]] )
elif model_name == "xclip-base-patch16":
UpperCAmelCase_ = torch.tensor([[0.0083, 0.9681, 0.0236]] )
elif model_name == "xclip-base-patch16-16-frames":
UpperCAmelCase_ = torch.tensor([[7.6937e-04, 9.9728e-01, 1.9473e-03]] )
elif model_name == "xclip-large-patch14":
UpperCAmelCase_ = torch.tensor([[0.0062, 0.9864, 0.0075]] )
elif model_name == "xclip-large-patch14-16-frames":
UpperCAmelCase_ = torch.tensor([[3.3877e-04, 9.9937e-01, 2.8888e-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
UpperCAmelCase_ = torch.tensor([[0.0555, 0.8914, 0.0531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
UpperCAmelCase_ = torch.tensor([[3.8554e-04, 9.9929e-01, 3.2754e-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
UpperCAmelCase_ = torch.tensor([[0.0036, 0.9920, 0.0045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
UpperCAmelCase_ = torch.tensor([[7.1890e-06, 9.9994e-01, 5.6559e-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
UpperCAmelCase_ = torch.tensor([[1.0320e-05, 9.9993e-01, 6.2435e-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
UpperCAmelCase_ = torch.tensor([[4.1377e-06, 9.9990e-01, 9.8386e-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
UpperCAmelCase_ = torch.tensor([[4.1347e-05, 9.9962e-01, 3.3411e-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
UpperCAmelCase_ = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
UpperCAmelCase_ = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
UpperCAmelCase_ = torch.tensor([[0.0027, 0.9904, 0.0070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
UpperCAmelCase_ = torch.tensor([[9.8219e-04, 9.9593e-01, 3.0863e-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
UpperCAmelCase_ = torch.tensor([[3.5082e-04, 9.9785e-01, 1.7966e-03]] )
else:
raise ValueError(f"""Model name {model_name} not supported""" )
assert torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
print("Pushing model, processor and slow tokenizer files to the hub..." )
model.push_to_hub(lowerCAmelCase__ , organization="nielsr" )
processor.push_to_hub(lowerCAmelCase__ , organization="nielsr" )
slow_tokenizer.push_to_hub(lowerCAmelCase__ , organization="nielsr" )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""xclip-base-patch32""",
type=str,
help="""Name of the model.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCamelCase = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 241 | 1 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
__magic_name__: List[str] = logging.get_logger(__name__)
@dataclass
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : Optional[Any] = [
'''no_inference''',
'''no_cuda''',
'''no_tpu''',
'''no_speed''',
'''no_memory''',
'''no_env_print''',
'''no_multi_process''',
]
def __init__( self , **lowerCAmelCase__ ) -> Optional[int]:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
__magic_name__ : Optional[Any] = deprecated_arg[3:]
__magic_name__ : Union[str, Any] = not kwargs.pop(lowerCAmelCase__ )
logger.warning(
F'{deprecated_arg} is depreciated. Please use --no-{positive_arg} or'
F' {positive_arg}={kwargs[positive_arg]}' )
__magic_name__ : Optional[int] = kwargs.pop("""tpu_name""" , self.tpu_name )
__magic_name__ : Dict = kwargs.pop("""device_idx""" , self.device_idx )
__magic_name__ : List[str] = kwargs.pop("""eager_mode""" , self.eager_mode )
__magic_name__ : int = kwargs.pop("""use_xla""" , self.use_xla )
super().__init__(**lowerCAmelCase__ )
lowercase__ : str = field(
default=_lowerCAmelCase , metadata={'''help''': '''Name of TPU'''} , )
lowercase__ : int = field(
default=0 , metadata={'''help''': '''CPU / GPU device index. Defaults to 0.'''} , )
lowercase__ : bool = field(default=_lowerCAmelCase , metadata={'''help''': '''Benchmark models in eager model.'''} )
lowercase__ : bool = field(
default=_lowerCAmelCase , metadata={
'''help''': '''Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'''
} , )
@cached_property
def __magic_name__ ( self ) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]:
requires_backends(self , ["""tf"""] )
__magic_name__ : Optional[Any] = None
if self.tpu:
try:
if self.tpu_name:
__magic_name__ : int = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
__magic_name__ : int = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
__magic_name__ : Any = None
return tpu
@cached_property
def __magic_name__ ( self ) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]:
requires_backends(self , ["""tf"""] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
__magic_name__ : str = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , """GPU""" )
__magic_name__ : Any = tf.distribute.OneDeviceStrategy(device=F'/gpu:{self.device_idx}' )
else:
tf.config.set_visible_devices([] , """GPU""" ) # disable GPU
__magic_name__ : Optional[int] = tf.distribute.OneDeviceStrategy(device=F'/cpu:{self.device_idx}' )
return strategy
@property
def __magic_name__ ( self ) -> bool:
requires_backends(self , ["""tf"""] )
return self._setup_tpu is not None
@property
def __magic_name__ ( self ) -> "tf.distribute.Strategy":
requires_backends(self , ["""tf"""] )
return self._setup_strategy
@property
def __magic_name__ ( self ) -> Optional[Any]:
requires_backends(self , ["""tf"""] )
return tf.config.list_physical_devices("""GPU""" )
@property
def __magic_name__ ( self ) -> int:
requires_backends(self , ["""tf"""] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def __magic_name__ ( self ) -> bool:
return self.n_gpu > 0
| 342 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__magic_name__: str = logging.get_logger(__name__)
__magic_name__: int = "▁"
__magic_name__: List[str] = {"vocab_file": "sentencepiece.bpe.model"}
__magic_name__: List[str] = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model"
),
}
}
__magic_name__: Tuple = {
"facebook/nllb-200-distilled-600M": 1_024,
}
# fmt: off
__magic_name__: int = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : str = VOCAB_FILES_NAMES
lowercase__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : str = ['''input_ids''', '''attention_mask''']
lowercase__ : List[int] = []
lowercase__ : List[int] = []
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__ = None , lowerCAmelCase__=None , lowerCAmelCase__=False , **lowerCAmelCase__ , ) -> int:
# Mask token behave like a normal word, i.e. include the space before it
__magic_name__ : Any = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
__magic_name__ : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
__magic_name__ : Optional[Any] = legacy_behaviour
super().__init__(
bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , src_lang=lowerCAmelCase__ , tgt_lang=lowerCAmelCase__ , additional_special_tokens=lowerCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=lowerCAmelCase__ , **lowerCAmelCase__ , )
__magic_name__ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCAmelCase__ ) )
__magic_name__ : List[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
__magic_name__ : List[str] = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__magic_name__ : List[Any] = 1
__magic_name__ : Dict = len(self.sp_model )
__magic_name__ : int = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(lowerCAmelCase__ )
}
__magic_name__ : Optional[int] = {v: k for k, v in self.lang_code_to_id.items()}
__magic_name__ : Union[str, Any] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
__magic_name__ : Optional[int] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
__magic_name__ : List[str] = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
__magic_name__ : List[Any] = src_lang if src_lang is not None else """eng_Latn"""
__magic_name__ : Any = self.lang_code_to_id[self._src_lang]
__magic_name__ : Optional[int] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ) -> Any:
__magic_name__ : List[Any] = self.__dict__.copy()
__magic_name__ : int = None
__magic_name__ : Optional[int] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , lowerCAmelCase__ ) -> Any:
__magic_name__ : Any = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__magic_name__ : Any = {}
__magic_name__ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def __magic_name__ ( self ) -> str:
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def __magic_name__ ( self ) -> str:
return self._src_lang
@src_lang.setter
def __magic_name__ ( self , lowerCAmelCase__ ) -> None:
__magic_name__ : Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
__magic_name__ : Optional[int] = [1] * len(self.prefix_tokens )
__magic_name__ : Any = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(lowerCAmelCase__ )) + suffix_ones
return prefix_ones + ([0] * len(lowerCAmelCase__ )) + ([0] * len(lowerCAmelCase__ )) + suffix_ones
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
__magic_name__ : str = [self.sep_token_id]
__magic_name__ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[str]:
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
__magic_name__ : Dict = src_lang
__magic_name__ : List[Any] = self(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ )
__magic_name__ : Optional[Any] = self.convert_tokens_to_ids(lowerCAmelCase__ )
__magic_name__ : Tuple = tgt_lang_id
return inputs
def __magic_name__ ( self ) -> int:
__magic_name__ : str = {self.convert_ids_to_tokens(lowerCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __magic_name__ ( self , lowerCAmelCase__ ) -> List[str]:
return self.sp_model.encode(lowerCAmelCase__ , out_type=lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Optional[Any]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__magic_name__ : List[str] = self.sp_model.PieceToId(lowerCAmelCase__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __magic_name__ ( self , lowerCAmelCase__ ) -> str:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Union[str, Any]:
__magic_name__ : Tuple = """""".join(lowerCAmelCase__ ).replace(lowerCAmelCase__ , """ """ ).strip()
return out_string
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__magic_name__ : List[Any] = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase__ , """wb""" ) as fi:
__magic_name__ : List[str] = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase__ )
return (out_vocab_file,)
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = "eng_Latn" , lowerCAmelCase__ = None , lowerCAmelCase__ = "fra_Latn" , **lowerCAmelCase__ , ) -> BatchEncoding:
__magic_name__ : List[str] = src_lang
__magic_name__ : Dict = tgt_lang
return super().prepare_seqaseq_batch(lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ )
def __magic_name__ ( self ) -> str:
return self.set_src_lang_special_tokens(self.src_lang )
def __magic_name__ ( self ) -> List[str]:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __magic_name__ ( self , lowerCAmelCase__ ) -> None:
__magic_name__ : Optional[int] = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
__magic_name__ : List[str] = []
__magic_name__ : Tuple = [self.eos_token_id, self.cur_lang_code]
else:
__magic_name__ : str = [self.cur_lang_code]
__magic_name__ : List[Any] = [self.eos_token_id]
def __magic_name__ ( self , lowerCAmelCase__ ) -> None:
__magic_name__ : List[str] = self.lang_code_to_id[lang]
if self.legacy_behaviour:
__magic_name__ : List[str] = []
__magic_name__ : Optional[int] = [self.eos_token_id, self.cur_lang_code]
else:
__magic_name__ : Optional[int] = [self.cur_lang_code]
__magic_name__ : Union[str, Any] = [self.eos_token_id]
| 342 | 1 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case : Any = inspect.getfile(accelerate.test_utils )
_snake_case : Any = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] )
_snake_case : int = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_distributed_data_loop.py'] )
_snake_case : Optional[int] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_ops.py'] )
@require_multi_gpu
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
print(f"""Found {torch.cuda.device_count()} devices.""" )
_snake_case : Tuple = ['torchrun', f"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCamelCase , env=os.environ.copy() )
@require_multi_gpu
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
print(f"""Found {torch.cuda.device_count()} devices.""" )
_snake_case : Dict = ['torchrun', f"""--nproc_per_node={torch.cuda.device_count()}""", self.operation_file_path]
print(f"""Command: {cmd}""" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCamelCase , env=os.environ.copy() )
@require_multi_gpu
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_snake_case : List[str] = ['torchrun', f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCamelCase , env=os.environ.copy() )
@require_multi_gpu
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
print(f"""Found {torch.cuda.device_count()} devices, using 2 devices only""" )
_snake_case : Tuple = ['torchrun', f"""--nproc_per_node={torch.cuda.device_count()}""", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices='0,1' ):
execute_subprocess_async(UpperCamelCase , env=os.environ.copy() )
if __name__ == "__main__":
lowerCAmelCase_ = Accelerator()
lowerCAmelCase_ = (accelerator.state.process_index + 2, 10)
lowerCAmelCase_ = torch.randint(0, 10, shape).to(accelerator.device)
lowerCAmelCase_ = """"""
lowerCAmelCase_ = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
lowerCAmelCase_ = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
lowerCAmelCase_ = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 260 |
from __future__ import annotations
def lowerCamelCase_ ( lowerCAmelCase: list , lowerCAmelCase: int | None = None , lowerCAmelCase: int | None = None )-> None:
if start is None:
_snake_case : int = 0
if end is None:
_snake_case : Optional[int] = len(lowerCAmelCase ) - 1
if start >= end:
return
_snake_case : int = (start + end) // 2
slowsort(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
slowsort(lowerCAmelCase , mid + 1 , lowerCAmelCase )
if sequence[end] < sequence[mid]:
_snake_case , _snake_case : str = sequence[mid], sequence[end]
slowsort(lowerCAmelCase , lowerCAmelCase , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 260 | 1 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase_ = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__( A__ , unittest.TestCase):
UpperCAmelCase__ : Any = XGLMTokenizer
UpperCAmelCase__ : Optional[Any] = XGLMTokenizerFast
UpperCAmelCase__ : Tuple = True
UpperCAmelCase__ : Any = True
def lowerCAmelCase__ ( self: Optional[Any] ):
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCamelCase = XGLMTokenizer(snake_case_ , keep_accents=snake_case_ )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = """<pad>"""
__lowerCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(len(snake_case_ ) , 10_08 )
def lowerCAmelCase__ ( self: Union[str, Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_08 )
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase = XGLMTokenizer(snake_case_ , keep_accents=snake_case_ )
__lowerCamelCase = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(snake_case_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
__lowerCamelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
snake_case_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
__lowerCamelCase = tokenizer.convert_tokens_to_ids(snake_case_ )
self.assertListEqual(
snake_case_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__lowerCamelCase = tokenizer.convert_ids_to_tokens(snake_case_ )
self.assertListEqual(
snake_case_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def lowerCAmelCase__ ( self: List[Any] ):
return XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
def lowerCAmelCase__ ( self: Tuple ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(snake_case_ , f.name )
__lowerCamelCase = XGLMTokenizer(f.name , keep_accents=snake_case_ )
__lowerCamelCase = pickle.dumps(snake_case_ )
pickle.loads(snake_case_ )
def lowerCAmelCase__ ( self: Tuple ):
if not self.test_rust_tokenizer:
return
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = self.get_rust_tokenizer()
__lowerCamelCase = """I was born in 92000, and this is falsé."""
__lowerCamelCase = tokenizer.tokenize(snake_case_ )
__lowerCamelCase = rust_tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCamelCase = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
__lowerCamelCase = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCamelCase = self.get_rust_tokenizer()
__lowerCamelCase = tokenizer.encode(snake_case_ )
__lowerCamelCase = rust_tokenizer.encode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
@slow
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase = """Hello World!"""
__lowerCamelCase = [2, 3_12_27, 44_47, 35]
self.assertListEqual(snake_case_ , self.big_tokenizer.encode(snake_case_ ) )
@slow
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"""
)
# fmt: off
__lowerCamelCase = [2, 10_18, 67, 11, 19_88, 26_17, 56_31, 2_78, 11, 34_07, 48, 7_16_30, 2_80_85, 4, 32_34, 1_57, 13, 6, 5, 6, 4, 35_26, 7_68, 15, 6_59, 57, 2_98, 39_83, 8_64, 1_29, 21, 6, 5, 1_36_75, 3_77, 6_52, 75_80, 1_03_41, 1_55, 28_17, 4_22, 16_66, 7, 16_74, 53, 1_13, 20_22_77, 1_78_92, 33, 60, 87, 4, 32_34, 1_57, 61, 26_67, 5_23_76, 19, 88, 23, 7_35]
# fmt: on
self.assertListEqual(snake_case_ , self.big_tokenizer.encode(snake_case_ ) )
@slow
def lowerCAmelCase__ ( self: Optional[Any] ):
# fmt: off
__lowerCamelCase = {
"""input_ids""": [[2, 10_88_25, 11_63, 15, 8_80_10, 4_73, 1_58_98, 1_57, 1_36_72, 18_57, 3_12, 8, 23_80_21, 11_63, 53, 1_36_72, 18_57, 3_12, 8, 5_32_83, 18_23_96, 8, 1_85_66, 16, 3_67_33, 41_01, 8, 2_30, 24_40_17, 12_25_53, 7, 15, 13_25_97, 4, 2_93, 1_25_11, 76_10, 4, 34_14, 13_25_97, 9, 4, 3_23_61, 3_62, 4, 7_34, 2_85_12, 3_25_69, 18, 4, 3_23_61, 2_60_96, 1_49_82, 73, 1_87_15, 2_14_33, 23_52_61, 15, 4_92, 1_24_27, 16, 53, 1_87_15, 2_14_33, 6_54_54, 15, 2_36_59, 5_63, 16, 2_78, 5_97, 28_43, 5_95, 79_31, 18_23_96, 6_41_86, 22, 8_86, 5_95, 13_29_81, 53, 2_55_40, 34_49, 4_39_82, 3_99_01, 59_51, 8_78, 3_30, 4, 2_76_94, 8_02_69, 3_12, 53, 65_17, 1_17_80, 6_11, 2_04_08, 5], [2, 6, 13_25_97, 67, 4_28_97, 33, 5_92, 8, 16_37_29, 2_55_40, 3_61, 13_69_97, 10_95_14, 17_32_30, 7, 5_01, 60, 10_29_13, 1_96, 56_31, 2_35, 6_32_43, 4_73, 6, 23_17_57, 74, 52_77, 79_05, 53, 30_95, 3_73_17, 22, 4_54, 18_38_74, 5], [2, 2_68, 3_12_98, 4_65_30, 6, 13_29_35, 4_38_31, 7, 5_97, 32, 24, 36_88, 98_65, 5]],
"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name="""facebook/xglm-564M""" , padding=snake_case_ , )
| 12 |
"""simple docstring"""
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
SCREAMING_SNAKE_CASE_ = '''bart'''
SCREAMING_SNAKE_CASE_ = True
@st.cache(allow_output_mutation=_lowerCAmelCase )
def lowercase ():
if LOAD_DENSE_INDEX:
__lowerCAmelCase = AutoTokenizer.from_pretrained("""yjernite/retribert-base-uncased""" )
__lowerCAmelCase = AutoModel.from_pretrained("""yjernite/retribert-base-uncased""" ).to("""cuda:0""" )
__lowerCAmelCase = qar_model.eval()
else:
__lowerCAmelCase , __lowerCAmelCase = (None, None)
if MODEL_TYPE == "bart":
__lowerCAmelCase = AutoTokenizer.from_pretrained("""yjernite/bart_eli5""" )
__lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained("""yjernite/bart_eli5""" ).to("""cuda:0""" )
__lowerCAmelCase = torch.load("""seq2seq_models/eli5_bart_model_blm_2.pth""" )
sas_model.load_state_dict(save_dict["""model"""] )
__lowerCAmelCase = sas_model.eval()
else:
__lowerCAmelCase , __lowerCAmelCase = make_qa_sas_model(
model_name="""t5-small""" , from_file="""seq2seq_models/eli5_t5_model_1024_4.pth""" , device="""cuda:0""" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=_lowerCAmelCase )
def lowercase ():
if LOAD_DENSE_INDEX:
__lowerCAmelCase = faiss.StandardGpuResources()
__lowerCAmelCase = datasets.load_dataset(path="""wiki_snippets""" , name="""wiki40b_en_100_0""" )["""train"""]
__lowerCAmelCase = np.memmap(
"""wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat""" , dtype="""float32""" , mode="""r""" , shape=(wikiaab_passages.num_rows, 128) , )
__lowerCAmelCase = faiss.IndexFlatIP(128 )
__lowerCAmelCase = faiss.index_cpu_to_gpu(_lowerCAmelCase , 1 , _lowerCAmelCase )
wikiaab_gpu_index_flat.add(_lowerCAmelCase ) # TODO fix for larger GPU
else:
__lowerCAmelCase , __lowerCAmelCase = (None, None)
__lowerCAmelCase = Elasticsearch([{"""host""": """localhost""", """port""": """9200"""}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=_lowerCAmelCase )
def lowercase ():
__lowerCAmelCase = datasets.load_dataset("""eli5""" , name="""LFQA_reddit""" )
__lowerCAmelCase = elia["""train_eli5"""]
__lowerCAmelCase = np.memmap(
"""eli5_questions_reps.dat""" , dtype="""float32""" , mode="""r""" , shape=(elia_train.num_rows, 128) )
__lowerCAmelCase = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(_lowerCAmelCase )
return (elia_train, eli5_train_q_index)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = load_indexes()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = load_models()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = load_train_data()
def lowercase (_lowerCAmelCase , _lowerCAmelCase=10 ):
__lowerCAmelCase = embed_questions_for_retrieval([question] , _lowerCAmelCase , _lowerCAmelCase )
__lowerCAmelCase , __lowerCAmelCase = eli5_train_q_index.search(_lowerCAmelCase , _lowerCAmelCase )
__lowerCAmelCase = [elia_train[int(_lowerCAmelCase )] for i in I[0]]
return nn_examples
def lowercase (_lowerCAmelCase , _lowerCAmelCase="wiki40b" , _lowerCAmelCase="dense" , _lowerCAmelCase=10 ):
if source == "none":
__lowerCAmelCase , __lowerCAmelCase = (""" <P> """.join(["""""" for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
__lowerCAmelCase , __lowerCAmelCase = query_qa_dense_index(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
__lowerCAmelCase , __lowerCAmelCase = query_es_index(
_lowerCAmelCase , _lowerCAmelCase , index_name="""english_wiki40b_snippets_100w""" , n_results=_lowerCAmelCase , )
__lowerCAmelCase = [
(res["""article_title"""], res["""section_title"""].strip(), res["""score"""], res["""passage_text"""]) for res in hit_lst
]
__lowerCAmelCase = """question: {} context: {}""".format(_lowerCAmelCase , _lowerCAmelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda _lowerCAmelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _lowerCAmelCase : None),
} )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=64 , _lowerCAmelCase=256 , _lowerCAmelCase=False , _lowerCAmelCase=2 , _lowerCAmelCase=0.95 , _lowerCAmelCase=0.8 ):
with torch.no_grad():
__lowerCAmelCase = qa_sas_generate(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , num_answers=1 , num_beams=_lowerCAmelCase , min_len=_lowerCAmelCase , max_len=_lowerCAmelCase , do_sample=_lowerCAmelCase , temp=_lowerCAmelCase , top_p=_lowerCAmelCase , top_k=_lowerCAmelCase , max_input_length=1024 , device="""cuda:0""" , )[0]
return (answer, support_list)
st.title('''Long Form Question Answering with ELI5''')
# Start sidebar
SCREAMING_SNAKE_CASE_ = '''<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'''
SCREAMING_SNAKE_CASE_ = '''
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class="img-container"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
''' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
SCREAMING_SNAKE_CASE_ = '''
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
'''
st.sidebar.markdown(description, unsafe_allow_html=True)
SCREAMING_SNAKE_CASE_ = [
'''Answer the question''',
'''View the retrieved document only''',
'''View the most similar ELI5 question and answer''',
'''Show me everything, please!''',
]
SCREAMING_SNAKE_CASE_ = st.sidebar.checkbox('''Demo options''')
if demo_options:
SCREAMING_SNAKE_CASE_ = st.sidebar.selectbox(
'''''',
action_list,
index=3,
)
SCREAMING_SNAKE_CASE_ = action_list.index(action_st)
SCREAMING_SNAKE_CASE_ = st.sidebar.selectbox(
'''''',
['''Show full text of passages''', '''Show passage section titles'''],
index=0,
)
SCREAMING_SNAKE_CASE_ = show_type == '''Show full text of passages'''
else:
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = st.sidebar.checkbox('''Retrieval options''')
if retrieval_options:
SCREAMING_SNAKE_CASE_ = '''
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
'''
st.sidebar.markdown(retriever_info)
SCREAMING_SNAKE_CASE_ = st.sidebar.selectbox('''Which Wikipedia format should the model use?''', ['''wiki40b''', '''none'''])
SCREAMING_SNAKE_CASE_ = st.sidebar.selectbox('''Which Wikipedia indexer should the model use?''', ['''dense''', '''sparse''', '''mixed'''])
else:
SCREAMING_SNAKE_CASE_ = '''wiki40b'''
SCREAMING_SNAKE_CASE_ = '''dense'''
SCREAMING_SNAKE_CASE_ = '''beam'''
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = 64
SCREAMING_SNAKE_CASE_ = 256
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = st.sidebar.checkbox('''Generation options''')
if generate_options:
SCREAMING_SNAKE_CASE_ = '''
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder\'s output probabilities.
'''
st.sidebar.markdown(generate_info)
SCREAMING_SNAKE_CASE_ = st.sidebar.selectbox('''Would you like to use beam search or sample an answer?''', ['''beam''', '''sampled'''])
SCREAMING_SNAKE_CASE_ = st.sidebar.slider(
'''Minimum generation length''', min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
SCREAMING_SNAKE_CASE_ = st.sidebar.slider(
'''Maximum generation length''', min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
SCREAMING_SNAKE_CASE_ = st.sidebar.slider('''Beam size''', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
SCREAMING_SNAKE_CASE_ = st.sidebar.slider(
'''Nucleus sampling p''', min_value=0.1, max_value=1.0, value=0.9_5, step=0.0_1, format=None, key=None
)
SCREAMING_SNAKE_CASE_ = st.sidebar.slider(
'''Temperature''', min_value=0.1, max_value=1.0, value=0.7, step=0.0_1, format=None, key=None
)
SCREAMING_SNAKE_CASE_ = None
# start main text
SCREAMING_SNAKE_CASE_ = [
'''<MY QUESTION>''',
'''How do people make chocolate?''',
'''Why do we get a fever when we are sick?''',
'''How can different animals perceive different colors?''',
'''What is natural language processing?''',
'''What\'s the best way to treat a sunburn?''',
'''What exactly are vitamins ?''',
'''How does nuclear energy provide electricity?''',
'''What\'s the difference between viruses and bacteria?''',
'''Why are flutes classified as woodwinds when most of them are made out of metal ?''',
'''Why do people like drinking coffee even though it tastes so bad?''',
'''What happens when wine ages? How does it make the wine taste better?''',
'''If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?''',
'''How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?''',
'''How does New Zealand have so many large bird predators?''',
]
SCREAMING_SNAKE_CASE_ = st.selectbox(
'''What would you like to ask? ---- select <MY QUESTION> to enter a new query''',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
SCREAMING_SNAKE_CASE_ = st.text_input('''Enter your question here:''', '''''')
else:
SCREAMING_SNAKE_CASE_ = question_s
if st.button('''Show me!'''):
if action in [0, 1, 3]:
if index_type == "mixed":
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = make_support(question, source=wiki_source, method='''dense''', n_results=10)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = make_support(question, source=wiki_source, method='''sparse''', n_results=10)
SCREAMING_SNAKE_CASE_ = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
SCREAMING_SNAKE_CASE_ = support_list[:10]
SCREAMING_SNAKE_CASE_ = '''<P> ''' + ''' <P> '''.join([res[-1] for res in support_list])
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == '''sampled'''),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('''### The model generated answer is:''')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('''--- \n ### The model is drawing information from the following Wikipedia passages:''')
for i, res in enumerate(support_list):
SCREAMING_SNAKE_CASE_ = '''https://en.wikipedia.org/wiki/{}'''.format(res[0].replace(''' ''', '''_'''))
SCREAMING_SNAKE_CASE_ = res[1].strip()
if sec_titles == "":
SCREAMING_SNAKE_CASE_ = '''[{}]({})'''.format(res[0], wiki_url)
else:
SCREAMING_SNAKE_CASE_ = sec_titles.split(''' & ''')
SCREAMING_SNAKE_CASE_ = ''' & '''.join(
['''[{}]({}#{})'''.format(sec.strip(), wiki_url, sec.strip().replace(''' ''', '''_''')) for sec in sec_list]
)
st.markdown(
'''{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'''.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'''> <span style="font-family:arial; font-size:10pt;">''' + res[-1] + '''</span>''', unsafe_allow_html=True
)
if action in [2, 3]:
SCREAMING_SNAKE_CASE_ = find_nearest_training(question)
SCREAMING_SNAKE_CASE_ = nn_train_list[0]
st.markdown(
'''--- \n ### The most similar question in the ELI5 training set was: \n\n {}'''.format(train_exple['''title'''])
)
SCREAMING_SNAKE_CASE_ = [
'''{}. {}'''.format(i + 1, ''' \n'''.join([line.strip() for line in ans.split('''\n''') if line.strip() != '''''']))
for i, (ans, sc) in enumerate(zip(train_exple['''answers''']['''text'''], train_exple['''answers''']['''score''']))
if i == 0 or sc > 2
]
st.markdown('''##### Its answers were: \n\n {}'''.format('''\n'''.join(answers_st)))
SCREAMING_SNAKE_CASE_ = '''
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
'''
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 301 | 0 |
"""simple docstring"""
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def _snake_case ( UpperCAmelCase_ : Union[str, Any] ):
A__ = [False] * len(UpperCAmelCase_ )
A__ = [-1] * len(UpperCAmelCase_ )
def dfs(UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str ):
A__ = True
A__ = c
for u in graph[v]:
if not visited[u]:
dfs(UpperCAmelCase_ , 1 - c )
for i in range(len(UpperCAmelCase_ ) ):
if not visited[i]:
dfs(UpperCAmelCase_ , 0 )
for i in range(len(UpperCAmelCase_ ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
SCREAMING_SNAKE_CASE_ : List[Any] = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 69 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
A__ = tempfile.mkdtemp()
A__ = SamImageProcessor()
A__ = SamProcessor(UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self: Optional[int] , **UpperCamelCase: List[str] ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase ).image_processor
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
A__ = [Image.fromarray(np.moveaxis(UpperCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A__ = self.get_image_processor(do_normalize=UpperCamelCase , padding_value=1.0 )
A__ = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=UpperCamelCase , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase )
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = SamProcessor(image_processor=UpperCamelCase )
A__ = self.prepare_image_inputs()
A__ = image_processor(UpperCamelCase , return_tensors="""np""" )
A__ = processor(images=UpperCamelCase , return_tensors="""np""" )
input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("""reshaped_input_sizes""" ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_torch
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = SamProcessor(image_processor=UpperCamelCase )
A__ = [torch.ones((1, 3, 5, 5) )]
A__ = [[17_64, 26_46]]
A__ = [[6_83, 10_24]]
A__ = processor.post_process_masks(UpperCamelCase , UpperCamelCase , UpperCamelCase )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
A__ = processor.post_process_masks(
UpperCamelCase , torch.tensor(UpperCamelCase ) , torch.tensor(UpperCamelCase ) )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
# should also work with np
A__ = [np.ones((1, 3, 5, 5) )]
A__ = processor.post_process_masks(UpperCamelCase , np.array(UpperCamelCase ) , np.array(UpperCamelCase ) )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
A__ = [[1, 0], [0, 1]]
with self.assertRaises(UpperCamelCase ):
A__ = processor.post_process_masks(UpperCamelCase , np.array(UpperCamelCase ) , np.array(UpperCamelCase ) )
@require_vision
@require_tf
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = tempfile.mkdtemp()
A__ = SamImageProcessor()
A__ = SamProcessor(UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self: Optional[int] , **UpperCamelCase: str ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase ).image_processor
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
A__ = [Image.fromarray(np.moveaxis(UpperCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A__ = self.get_image_processor(do_normalize=UpperCamelCase , padding_value=1.0 )
A__ = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=UpperCamelCase , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase )
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = SamProcessor(image_processor=UpperCamelCase )
A__ = self.prepare_image_inputs()
A__ = image_processor(UpperCamelCase , return_tensors="""np""" )
A__ = processor(images=UpperCamelCase , return_tensors="""np""" )
input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("""reshaped_input_sizes""" ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_tf
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = SamProcessor(image_processor=UpperCamelCase )
A__ = [tf.ones((1, 3, 5, 5) )]
A__ = [[17_64, 26_46]]
A__ = [[6_83, 10_24]]
A__ = processor.post_process_masks(UpperCamelCase , UpperCamelCase , UpperCamelCase , return_tensors="""tf""" )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
A__ = processor.post_process_masks(
UpperCamelCase , tf.convert_to_tensor(UpperCamelCase ) , tf.convert_to_tensor(UpperCamelCase ) , return_tensors="""tf""" , )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
# should also work with np
A__ = [np.ones((1, 3, 5, 5) )]
A__ = processor.post_process_masks(
UpperCamelCase , np.array(UpperCamelCase ) , np.array(UpperCamelCase ) , return_tensors="""tf""" )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
A__ = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
A__ = processor.post_process_masks(
UpperCamelCase , np.array(UpperCamelCase ) , np.array(UpperCamelCase ) , return_tensors="""tf""" )
@require_vision
@require_torchvision
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = tempfile.mkdtemp()
A__ = SamImageProcessor()
A__ = SamProcessor(UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self: Tuple , **UpperCamelCase: Tuple ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase ).image_processor
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
A__ = [Image.fromarray(np.moveaxis(UpperCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = SamProcessor(image_processor=UpperCamelCase )
A__ = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
A__ = [tf.convert_to_tensor(UpperCamelCase )]
A__ = [torch.tensor(UpperCamelCase )]
A__ = [[17_64, 26_46]]
A__ = [[6_83, 10_24]]
A__ = processor.post_process_masks(
UpperCamelCase , UpperCamelCase , UpperCamelCase , return_tensors="""tf""" )
A__ = processor.post_process_masks(
UpperCamelCase , UpperCamelCase , UpperCamelCase , return_tensors="""pt""" )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = SamProcessor(image_processor=UpperCamelCase )
A__ = self.prepare_image_inputs()
A__ = image_processor(UpperCamelCase , return_tensors="""pt""" )["""pixel_values"""].numpy()
A__ = processor(images=UpperCamelCase , return_tensors="""pt""" )["""pixel_values"""].numpy()
A__ = image_processor(UpperCamelCase , return_tensors="""tf""" )["""pixel_values"""].numpy()
A__ = processor(images=UpperCamelCase , return_tensors="""tf""" )["""pixel_values"""].numpy()
self.assertTrue(np.allclose(UpperCamelCase , UpperCamelCase ) )
self.assertTrue(np.allclose(UpperCamelCase , UpperCamelCase ) )
self.assertTrue(np.allclose(UpperCamelCase , UpperCamelCase ) )
| 69 | 1 |
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if not grid or not grid[0]:
raise TypeError('''The grid does not contain the appropriate information''' )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
__UpperCamelCase :List[Any] = grid[0]
for row_n in range(1 , len(SCREAMING_SNAKE_CASE ) ):
__UpperCamelCase :Dict = grid[row_n]
__UpperCamelCase :List[str] = fill_row(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCamelCase :Any = grid[row_n]
return grid[-1][-1]
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
current_row[0] += row_above[0]
for cell_n in range(1 , len(SCREAMING_SNAKE_CASE ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 | import random
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Optional[Any] = a[left_index]
__UpperCamelCase :Any = left_index + 1
for j in range(left_index + 1 , SCREAMING_SNAKE_CASE ):
if a[j] < pivot:
__UpperCamelCase , __UpperCamelCase :str = a[i], a[j]
i += 1
__UpperCamelCase , __UpperCamelCase :Optional[int] = a[i - 1], a[left_index]
return i - 1
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if left < right:
__UpperCamelCase :int = random.randint(SCREAMING_SNAKE_CASE , right - 1 )
__UpperCamelCase , __UpperCamelCase :List[str] = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
__UpperCamelCase :Dict = partition(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
quick_sort_random(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # recursive quicksort to the left of the pivot point
quick_sort_random(
SCREAMING_SNAKE_CASE , pivot_index + 1 , SCREAMING_SNAKE_CASE ) # recursive quicksort to the right of the pivot point
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :Tuple = input('''Enter numbers separated by a comma:\n''' ).strip()
__UpperCamelCase :Union[str, Any] = [int(SCREAMING_SNAKE_CASE ) for item in user_input.split(''',''' )]
quick_sort_random(SCREAMING_SNAKE_CASE , 0 , len(SCREAMING_SNAKE_CASE ) )
print(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 43 | 1 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
__lowerCAmelCase = False
class __a ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class __a ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase__: Dict = VersatileDiffusionTextToImagePipeline.from_pretrained('shi-labs/versatile-diffusion' )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowercase__: List[Any] = '''A painting of a squirrel eating a burger '''
lowercase__: List[Any] = torch.manual_seed(0 )
lowercase__: Optional[int] = pipe(
prompt=lowerCAmelCase__ , generator=lowerCAmelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCAmelCase__ )
lowercase__: str = VersatileDiffusionTextToImagePipeline.from_pretrained(lowerCAmelCase__ )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowercase__: Optional[Any] = generator.manual_seed(0 )
lowercase__: Optional[Any] = pipe(
prompt=lowerCAmelCase__ , generator=lowerCAmelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
lowercase__: Union[str, Any] = VersatileDiffusionTextToImagePipeline.from_pretrained(
'shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowercase__: Dict = '''A painting of a squirrel eating a burger '''
lowercase__: int = torch.manual_seed(0 )
lowercase__: Any = pipe(
prompt=lowerCAmelCase__ , generator=lowerCAmelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' ).images
lowercase__: List[Any] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__: str = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 352 |
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger(__name__)
def snake_case_ ( snake_case , snake_case , snake_case ) -> Any:
lowercase__: Dict = os.path.abspath(snake_case )
logger.info(f'Converting TensorFlow checkpoint from {tf_path}' )
# Load weights from TF model
lowercase__: Optional[Any] = tf.train.list_variables(snake_case )
lowercase__: List[Any] = []
lowercase__: Tuple = []
lowercase__: int = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
lowercase__: Union[str, Any] = full_name.split('/' )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(f'Skipping non-model layer {full_name}' )
continue
if "optimizer" in full_name:
logger.info(f'Skipping optimization layer {full_name}' )
continue
if name[0] == "model":
# ignore initial 'model'
lowercase__: str = name[1:]
# figure out how many levels deep the name is
lowercase__: Optional[Any] = 0
for _name in name:
if _name.startswith('layer_with_weights' ):
depth += 1
else:
break
layer_depth.append(snake_case )
# read data
lowercase__: Optional[Any] = tf.train.load_variable(snake_case , snake_case )
names.append('/'.join(snake_case ) )
arrays.append(snake_case )
logger.info(f'Read a total of {len(snake_case ):,} layers' )
# Sanity check
if len(set(snake_case ) ) != 1:
raise ValueError(f'Found layer names with different depths (layer depth {list(set(snake_case ) )})' )
lowercase__: Any = list(set(snake_case ) )[0]
if layer_depth != 1:
raise ValueError(
'The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP'
' heads.' )
# convert layers
logger.info('Converting weights...' )
for full_name, array in zip(snake_case , snake_case ):
lowercase__: Optional[int] = full_name.split('/' )
lowercase__: List[Any] = model
lowercase__: Any = []
for i, m_name in enumerate(snake_case ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith('layer_with_weights' ):
lowercase__: Any = int(m_name.split('-' )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(['embeddings', 'LayerNorm'] )
lowercase__: str = getattr(snake_case , 'embeddings' )
lowercase__: int = getattr(snake_case , 'LayerNorm' )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(['encoder', 'layer', str(layer_num - 4 )] )
lowercase__: int = getattr(snake_case , 'encoder' )
lowercase__: List[str] = getattr(snake_case , 'layer' )
lowercase__: Union[str, Any] = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(['pooler', 'dense'] )
lowercase__: Tuple = getattr(snake_case , 'pooler' )
lowercase__: Tuple = getattr(snake_case , 'dense' )
elif m_name == "embeddings":
trace.append('embeddings' )
lowercase__: Union[str, Any] = getattr(snake_case , 'embeddings' )
if layer_num == 0:
trace.append('word_embeddings' )
lowercase__: Union[str, Any] = getattr(snake_case , 'word_embeddings' )
elif layer_num == 1:
trace.append('position_embeddings' )
lowercase__: Dict = getattr(snake_case , 'position_embeddings' )
elif layer_num == 2:
trace.append('token_type_embeddings' )
lowercase__: Optional[Any] = getattr(snake_case , 'token_type_embeddings' )
else:
raise ValueError(f'Unknown embedding layer with name {full_name}' )
trace.append('weight' )
lowercase__: List[str] = getattr(snake_case , 'weight' )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(['attention', 'self'] )
lowercase__: int = getattr(snake_case , 'attention' )
lowercase__: Union[str, Any] = getattr(snake_case , 'self' )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(['attention', 'output', 'LayerNorm'] )
lowercase__: str = getattr(snake_case , 'attention' )
lowercase__: Optional[Any] = getattr(snake_case , 'output' )
lowercase__: List[Any] = getattr(snake_case , 'LayerNorm' )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(['attention', 'output', 'dense'] )
lowercase__: Optional[Any] = getattr(snake_case , 'attention' )
lowercase__: Optional[int] = getattr(snake_case , 'output' )
lowercase__: Optional[Any] = getattr(snake_case , 'dense' )
elif m_name == "_output_dense":
# output dense
trace.extend(['output', 'dense'] )
lowercase__: Union[str, Any] = getattr(snake_case , 'output' )
lowercase__: List[Any] = getattr(snake_case , 'dense' )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(['output', 'LayerNorm'] )
lowercase__: Any = getattr(snake_case , 'output' )
lowercase__: str = getattr(snake_case , 'LayerNorm' )
elif m_name == "_key_dense":
# attention key
trace.append('key' )
lowercase__: Tuple = getattr(snake_case , 'key' )
elif m_name == "_query_dense":
# attention query
trace.append('query' )
lowercase__: List[str] = getattr(snake_case , 'query' )
elif m_name == "_value_dense":
# attention value
trace.append('value' )
lowercase__: Optional[int] = getattr(snake_case , 'value' )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(['intermediate', 'dense'] )
lowercase__: Any = getattr(snake_case , 'intermediate' )
lowercase__: str = getattr(snake_case , 'dense' )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append('output' )
lowercase__: Union[str, Any] = getattr(snake_case , 'output' )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append('bias' )
lowercase__: str = getattr(snake_case , 'bias' )
elif m_name in ["kernel", "gamma"]:
trace.append('weight' )
lowercase__: Tuple = getattr(snake_case , 'weight' )
else:
logger.warning(f'Ignored {m_name}' )
# for certain layers reshape is necessary
lowercase__: Any = '.'.join(snake_case )
if re.match(R'(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)' , snake_case ) or re.match(
R'(\S+)\.attention\.output\.dense\.weight' , snake_case ):
lowercase__: Union[str, Any] = array.reshape(pointer.data.shape )
if "kernel" in full_name:
lowercase__: str = array.transpose()
if pointer.shape == array.shape:
lowercase__: Union[str, Any] = torch.from_numpy(snake_case )
else:
raise ValueError(
f'Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:'
f' {array.shape}' )
logger.info(f'Successfully set variable {full_name} to PyTorch layer {trace}' )
return model
def snake_case_ ( snake_case , snake_case , snake_case ) -> Any:
# Instantiate model
logger.info(f'Loading model based on config from {config_path}...' )
lowercase__: int = BertConfig.from_json_file(snake_case )
lowercase__: Tuple = BertModel(snake_case )
# Load weights from checkpoint
logger.info(f'Loading weights from checkpoint {tf_checkpoint_path}...' )
load_tfa_weights_in_bert(snake_case , snake_case , snake_case )
# Save pytorch-model
logger.info(f'Saving PyTorch model to {pytorch_dump_path}...' )
torch.save(model.state_dict() , snake_case )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'''--tf_checkpoint_path''', type=str, required=True, help='''Path to the TensorFlow 2.x checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
type=str,
required=True,
help='''The config json file corresponding to the BERT model. This specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''',
type=str,
required=True,
help='''Path to the output PyTorch model (must include filename).''',
)
__lowerCAmelCase = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 288 | 0 |
"""simple docstring"""
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def _snake_case ( _snake_case : Dict , _snake_case : int ):
lowerCAmelCase : Dict = k_size // 2
lowerCAmelCase, lowerCAmelCase : int = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
lowerCAmelCase : Dict = 1 / (2 * pi * sigma) * exp(-(square(_snake_case ) + square(_snake_case )) / (2 * square(_snake_case )) )
return g
def _snake_case ( _snake_case : List[Any] , _snake_case : Dict , _snake_case : List[str] ):
lowerCAmelCase, lowerCAmelCase : Optional[Any] = image.shape[0], image.shape[1]
# dst image height and width
lowerCAmelCase : str = height - k_size + 1
lowerCAmelCase : Any = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
lowerCAmelCase : int = zeros((dst_height * dst_width, k_size * k_size) )
lowerCAmelCase : Optional[int] = 0
for i, j in product(range(_snake_case ) , range(_snake_case ) ):
lowerCAmelCase : Union[str, Any] = ravel(image[i : i + k_size, j : j + k_size] )
lowerCAmelCase : List[str] = window
row += 1
# turn the kernel into shape(k*k, 1)
lowerCAmelCase : Dict = gen_gaussian_kernel(_snake_case , _snake_case )
lowerCAmelCase : Union[str, Any] = ravel(_snake_case )
# reshape and get the dst image
lowerCAmelCase : Any = dot(_snake_case , _snake_case ).reshape(_snake_case , _snake_case ).astype(_snake_case )
return dst
if __name__ == "__main__":
# read original image
snake_case__ : Dict = imread(R'''../image_data/lena.jpg''')
# turn image in gray scale value
snake_case__ : int = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
snake_case__ : Optional[int] = gaussian_filter(gray, 3, sigma=1)
snake_case__ : Union[str, Any] = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow('''gaussian filter with 3x3 mask''', gaussianaxa)
imshow('''gaussian filter with 5x5 mask''', gaussianaxa)
waitKey()
| 60 |
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
__lowerCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(__UpperCamelCase )
class __a ( __UpperCamelCase ):
def __init__( self , **lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self , lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
return super().__call__(lowerCAmelCase__ , **lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , **lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
lowercase__: int = {}
if "candidate_labels" in kwargs:
lowercase__: Dict = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
lowercase__: List[Any] = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__="This is a photo of {}." ) -> int:
'''simple docstring'''
lowercase__: Optional[int] = load_image(lowerCAmelCase__ )
lowercase__: Dict = self.image_processor(images=[image] , return_tensors=self.framework )
lowercase__: Tuple = candidate_labels
lowercase__: List[str] = [hypothesis_template.format(lowerCAmelCase__ ) for x in candidate_labels]
lowercase__: Optional[Any] = self.tokenizer(lowerCAmelCase__ , return_tensors=self.framework , padding=lowerCAmelCase__ )
lowercase__: str = [text_inputs]
return inputs
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> int:
'''simple docstring'''
lowercase__: str = model_inputs.pop('candidate_labels' )
lowercase__: List[str] = model_inputs.pop('text_inputs' )
if isinstance(text_inputs[0] , lowerCAmelCase__ ):
lowercase__: Any = text_inputs[0]
else:
# Batching case.
lowercase__: Optional[int] = text_inputs[0][0]
lowercase__: Tuple = self.model(**lowerCAmelCase__ , **lowerCAmelCase__ )
lowercase__: Any = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_image,
}
return model_outputs
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
lowercase__: Tuple = model_outputs.pop('candidate_labels' )
lowercase__: Dict = model_outputs['logits'][0]
if self.framework == "pt":
lowercase__: Optional[int] = logits.softmax(dim=-1 ).squeeze(-1 )
lowercase__: Dict = probs.tolist()
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase__: Dict = [scores]
elif self.framework == "tf":
lowercase__: Optional[int] = stable_softmax(lowerCAmelCase__ , axis=-1 )
lowercase__: Union[str, Any] = probs.numpy().tolist()
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
lowercase__: List[Any] = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(lowerCAmelCase__ , lowerCAmelCase__ ) , key=lambda lowerCAmelCase__ : -x[0] )
]
return result
| 196 | 0 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
"EleutherAI/gpt-j-6B": "https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class __SCREAMING_SNAKE_CASE ( a_ ):
A : List[Any] = "gptj"
A : Optional[int] = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , SCREAMING_SNAKE_CASE__=50400 , SCREAMING_SNAKE_CASE__=2048 , SCREAMING_SNAKE_CASE__=4096 , SCREAMING_SNAKE_CASE__=28 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=64 , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__="gelu_new" , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=1E-5 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=50256 , SCREAMING_SNAKE_CASE__=50256 , SCREAMING_SNAKE_CASE__=False , **SCREAMING_SNAKE_CASE__ , ):
lowercase : Dict = vocab_size
lowercase : List[Any] = n_positions
lowercase : Optional[int] = n_embd
lowercase : List[Any] = n_layer
lowercase : int = n_head
lowercase : Any = n_inner
lowercase : Optional[Any] = rotary_dim
lowercase : Optional[Any] = activation_function
lowercase : Any = resid_pdrop
lowercase : str = embd_pdrop
lowercase : int = attn_pdrop
lowercase : Dict = layer_norm_epsilon
lowercase : Any = initializer_range
lowercase : List[Any] = use_cache
lowercase : Union[str, Any] = bos_token_id
lowercase : Any = eos_token_id
super().__init__(
bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , tie_word_embeddings=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
class __SCREAMING_SNAKE_CASE ( a_ ):
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = "default" , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False , ):
super().__init__(SCREAMING_SNAKE_CASE__ , task=SCREAMING_SNAKE_CASE__ , patching_specs=SCREAMING_SNAKE_CASE__ , use_past=SCREAMING_SNAKE_CASE__ )
if not getattr(self._config , '''pad_token_id''' , SCREAMING_SNAKE_CASE__ ):
# TODO: how to do that better?
lowercase : int = 0
@property
def __lowerCamelCase ( self ):
lowercase : Any = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE__ , direction='''inputs''' )
lowercase : Any = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
lowercase : str = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def __lowerCamelCase ( self ):
return self._config.n_layer
@property
def __lowerCamelCase ( self ):
return self._config.n_head
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = None , ):
lowercase : Tuple = super(SCREAMING_SNAKE_CASE__ , self ).generate_dummy_inputs(
SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , seq_length=SCREAMING_SNAKE_CASE__ , is_pair=SCREAMING_SNAKE_CASE__ , framework=SCREAMING_SNAKE_CASE__ )
# We need to order the input in the way they appears in the forward()
lowercase : Dict = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowercase , lowercase : List[Any] = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
lowercase : Optional[Any] = seqlen + 2
lowercase : Tuple = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowercase : int = [
(torch.zeros(SCREAMING_SNAKE_CASE__ ), torch.zeros(SCREAMING_SNAKE_CASE__ )) for _ in range(self.num_layers )
]
lowercase : int = common_inputs['''attention_mask''']
if self.use_past:
lowercase : Union[str, Any] = ordered_inputs['''attention_mask'''].dtype
lowercase : Dict = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ )] , dim=1 )
return ordered_inputs
@property
def __lowerCamelCase ( self ):
return 13
| 350 |
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCamelCase ( self ):
lowercase : int = 0
@slow
def __lowerCamelCase ( self ):
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
lowercase : Optional[Any] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(SCREAMING_SNAKE_CASE__ ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
lowercase : str = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(SCREAMING_SNAKE_CASE__ ) , 0 )
def __lowerCamelCase ( self ):
lowercase : Tuple = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def __lowerCamelCase ( self ):
lowercase : Any = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def __lowerCamelCase ( self ):
lowercase : Union[str, Any] = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Check that tokenizer_type ≠ model_type
lowercase : Optional[int] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ , config=SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def __lowerCamelCase ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(SCREAMING_SNAKE_CASE__ , '''vocab.txt''' ) )
lowercase : Tuple = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ , tokenizer_type='''bert''' , use_fast=SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(SCREAMING_SNAKE_CASE__ , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(SCREAMING_SNAKE_CASE__ , '''merges.txt''' ) )
lowercase : Any = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ , tokenizer_type='''gpt2''' , use_fast=SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@require_tokenizers
def __lowerCamelCase ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(SCREAMING_SNAKE_CASE__ , '''vocab.txt''' ) )
lowercase : Dict = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ , tokenizer_type='''bert''' )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(SCREAMING_SNAKE_CASE__ , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(SCREAMING_SNAKE_CASE__ , '''merges.txt''' ) )
lowercase : int = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ , tokenizer_type='''gpt2''' )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
AutoTokenizer.from_pretrained('''./''' , tokenizer_type='''xxx''' )
@require_tokenizers
def __lowerCamelCase ( self ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
lowercase : Union[str, Any] = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , (BertTokenizer, BertTokenizerFast) )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , SCREAMING_SNAKE_CASE__ )
else:
self.assertEqual(tokenizer.do_lower_case , SCREAMING_SNAKE_CASE__ )
self.assertEqual(tokenizer.model_max_length , 512 )
@require_tokenizers
def __lowerCamelCase ( self ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
SCREAMING_SNAKE_CASE__ , '''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' , ):
lowercase : str = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' )
def __lowerCamelCase ( self ):
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
lowercase : Any = TOKENIZER_MAPPING.values()
lowercase : Tuple = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(SCREAMING_SNAKE_CASE__ )
@require_tokenizers
def __lowerCamelCase ( self ):
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ) , SCREAMING_SNAKE_CASE__ )
@require_tokenizers
def __lowerCamelCase ( self ):
lowercase : Optional[Any] = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' , do_lower_case=SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = '''Hello, world. How are you?'''
lowercase : Any = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
self.assertEqual('''[UNK]''' , tokens[0] )
lowercase : Optional[Any] = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' , do_lower_case=SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
self.assertEqual('''[UNK]''' , tokens[0] )
@require_tokenizers
def __lowerCamelCase ( self ):
lowercase : int = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' )
self.assertEqual(type(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(tokenizer.model_max_length , 512 )
self.assertEqual(tokenizer.vocab_size , 30000 )
self.assertEqual(tokenizer.unk_token , '''[UNK]''' )
self.assertEqual(tokenizer.padding_side , '''right''' )
self.assertEqual(tokenizer.truncation_side , '''right''' )
def __lowerCamelCase ( self ):
lowercase : Tuple = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
lowercase : Any = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def __lowerCamelCase ( self ):
lowercase : Union[str, Any] = AutoTokenizer.from_pretrained('''ctrl''' )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
# Check we can load the tokenizer config of an online model.
lowercase : Optional[Any] = get_tokenizer_config('''bert-base-cased''' )
lowercase : str = config.pop('''_commit_hash''' , SCREAMING_SNAKE_CASE__ )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(SCREAMING_SNAKE_CASE__ , {'''do_lower_case''': False} )
# This model does not have a tokenizer_config so we get back an empty dict.
lowercase : Union[str, Any] = get_tokenizer_config(SCREAMING_SNAKE_CASE__ )
self.assertDictEqual(SCREAMING_SNAKE_CASE__ , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
lowercase : str = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
lowercase : Tuple = get_tokenizer_config(SCREAMING_SNAKE_CASE__ )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['''tokenizer_class'''] , '''BertTokenizer''' )
def __lowerCamelCase ( self ):
try:
AutoConfig.register('''custom''' , SCREAMING_SNAKE_CASE__ )
AutoTokenizer.register(SCREAMING_SNAKE_CASE__ , slow_tokenizer_class=SCREAMING_SNAKE_CASE__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
AutoTokenizer.register(SCREAMING_SNAKE_CASE__ , slow_tokenizer_class=SCREAMING_SNAKE_CASE__ )
lowercase : int = CustomTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
lowercase : str = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def __lowerCamelCase ( self ):
try:
AutoConfig.register('''custom''' , SCREAMING_SNAKE_CASE__ )
# Can register in two steps
AutoTokenizer.register(SCREAMING_SNAKE_CASE__ , slow_tokenizer_class=SCREAMING_SNAKE_CASE__ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(SCREAMING_SNAKE_CASE__ , fast_tokenizer_class=SCREAMING_SNAKE_CASE__ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
SCREAMING_SNAKE_CASE__ , slow_tokenizer_class=SCREAMING_SNAKE_CASE__ , fast_tokenizer_class=SCREAMING_SNAKE_CASE__ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
AutoTokenizer.register(SCREAMING_SNAKE_CASE__ , fast_tokenizer_class=SCREAMING_SNAKE_CASE__ )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase : Union[str, Any] = BertTokenizerFast.from_pretrained(SCREAMING_SNAKE_CASE__ )
bert_tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = CustomTokenizerFast.from_pretrained(SCREAMING_SNAKE_CASE__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
lowercase : Tuple = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ , use_fast=SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def __lowerCamelCase ( self ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
lowercase : List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
lowercase : str = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=SCREAMING_SNAKE_CASE__ )
lowercase : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=SCREAMING_SNAKE_CASE__ )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ , trust_remote_code=SCREAMING_SNAKE_CASE__ )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
lowercase : int = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=SCREAMING_SNAKE_CASE__ , use_fast=SCREAMING_SNAKE_CASE__ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
lowercase : Tuple = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ , trust_remote_code=SCREAMING_SNAKE_CASE__ , use_fast=SCREAMING_SNAKE_CASE__ )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
@require_tokenizers
def __lowerCamelCase ( self ):
class __SCREAMING_SNAKE_CASE ( A__ ):
A : str = False
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Dict = NewTokenizer
A : Optional[int] = False
try:
AutoConfig.register('''custom''' , SCREAMING_SNAKE_CASE__ )
AutoTokenizer.register(SCREAMING_SNAKE_CASE__ , slow_tokenizer_class=SCREAMING_SNAKE_CASE__ )
AutoTokenizer.register(SCREAMING_SNAKE_CASE__ , fast_tokenizer_class=SCREAMING_SNAKE_CASE__ )
# If remote code is not set, the default is to use local
lowercase : Union[str, Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
lowercase : Union[str, Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , use_fast=SCREAMING_SNAKE_CASE__ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
lowercase : Tuple = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=SCREAMING_SNAKE_CASE__ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
lowercase : List[str] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=SCREAMING_SNAKE_CASE__ , use_fast=SCREAMING_SNAKE_CASE__ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
lowercase : Any = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=SCREAMING_SNAKE_CASE__ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertTrue(tokenizer.special_attribute_present )
lowercase : List[Any] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=SCREAMING_SNAKE_CASE__ , use_fast=SCREAMING_SNAKE_CASE__ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def __lowerCamelCase ( self ):
lowercase : Dict = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=SCREAMING_SNAKE_CASE__ )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
lowercase : int = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=SCREAMING_SNAKE_CASE__ , use_fast=SCREAMING_SNAKE_CASE__ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def __lowerCamelCase ( self ):
with self.assertRaisesRegex(
SCREAMING_SNAKE_CASE__ , '''bert-base is not a local folder and is not a valid model identifier''' ):
lowercase : List[Any] = AutoTokenizer.from_pretrained('''bert-base''' )
def __lowerCamelCase ( self ):
with self.assertRaisesRegex(
SCREAMING_SNAKE_CASE__ , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
lowercase : Optional[int] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ , revision='''aaaaaa''' )
def __lowerCamelCase ( self ):
# Make sure we have cached the tokenizer.
lowercase : Optional[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
lowercase : List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 173 | 0 |
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def SCREAMING_SNAKE_CASE_ ( *__A : str , __A : Optional[Union[Dict, Any]] = None , __A : Tuple=True , __A : int=2 ) -> Optional[Any]:
"""simple docstring"""
from .. import __version__
a_ : Dict = take_from
a_ : List[str] = ()
if not isinstance(args[0] , __A ):
a_ : Optional[Any] = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(__A ).base_version ) >= version.parse(__A ):
raise ValueError(
F"""The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"""
F""" version {__version__} is >= {version_name}""" )
a_ : Optional[Any] = None
if isinstance(__A , __A ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(__A ),)
a_ : Optional[int] = F"""The `{attribute}` argument is deprecated and will be removed in version {version_name}."""
elif hasattr(__A , __A ):
values += (getattr(__A , __A ),)
a_ : int = F"""The `{attribute}` attribute is deprecated and will be removed in version {version_name}."""
elif deprecated_kwargs is None:
a_ : Union[str, Any] = F"""`{attribute}` is deprecated and will be removed in version {version_name}."""
if warning is not None:
a_ : str = warning + ' ' if standard_warn else ''
warnings.warn(warning + message , __A , stacklevel=__A )
if isinstance(__A , __A ) and len(__A ) > 0:
a_ : List[Any] = inspect.getouterframes(inspect.currentframe() )[1]
a_ : Dict = call_frame.filename
a_ : Union[str, Any] = call_frame.lineno
a_ : Union[str, Any] = call_frame.function
a_ , a_ : Dict = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F"""{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`""" )
if len(__A ) == 0:
return
elif len(__A ) == 1:
return values[0]
return values
| 32 |
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
__A =getLogger(__name__)
def a ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : int = 8 , _UpperCAmelCase : int = 10_24 , _UpperCAmelCase : Any="val" , _UpperCAmelCase : Dict=None , _UpperCAmelCase : Any=False , _UpperCAmelCase : Union[str, Any]="summarization" , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : Union[str, Any]=1 , _UpperCAmelCase : Dict = None , _UpperCAmelCase : Dict="" , **_UpperCAmelCase : List[str] , ):
'''simple docstring'''
__UpperCAmelCase : Any = str(_UpperCAmelCase )
assert local_rank is not None
torch.distributed.init_process_group(backend='''nccl''' , rank=_UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = Path(_UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = save_dir.joinpath(f'rank_{local_rank}_output.json' )
torch.cuda.set_device(_UpperCAmelCase )
__UpperCAmelCase : int = AutoModelForSeqaSeqLM.from_pretrained(_UpperCAmelCase ).cuda()
if fpaa:
__UpperCAmelCase : Any = model.half()
# determine if we need to increase num_beams
use_task_specific_params(_UpperCAmelCase , _UpperCAmelCase ) # update config with task specific params
__UpperCAmelCase : List[str] = generate_kwargs.pop('''num_beams''' , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
__UpperCAmelCase : Any = num_return_sequences
__UpperCAmelCase : Tuple = AutoTokenizer.from_pretrained(_UpperCAmelCase )
logger.info(f'Inferred tokenizer type: {tokenizer.__class__}' ) # if this is wrong, check config.model_type.
if max_source_length is None:
__UpperCAmelCase : Optional[Any] = tokenizer.model_max_length
if prefix is None:
__UpperCAmelCase : str = prefix or getattr(model.config , '''prefix''' , '''''' ) or ''''''
__UpperCAmelCase : Union[str, Any] = SeqaSeqDataset(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , max_target_length=10_24 , type_path=_UpperCAmelCase , n_obs=_UpperCAmelCase , prefix=_UpperCAmelCase , **_UpperCAmelCase , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
__UpperCAmelCase : str = ds.make_sortish_sampler(_UpperCAmelCase , distributed=_UpperCAmelCase , add_extra_examples=_UpperCAmelCase , shuffle=_UpperCAmelCase )
__UpperCAmelCase : List[Any] = DataLoader(_UpperCAmelCase , sampler=_UpperCAmelCase , batch_size=_UpperCAmelCase , collate_fn=ds.collate_fn )
__UpperCAmelCase : List[Any] = []
for batch in tqdm(_UpperCAmelCase ):
__UpperCAmelCase : str = model.generate(
input_ids=batch['''input_ids'''].to(model.device ) , attention_mask=batch['''attention_mask'''].to(model.device ) , num_return_sequences=_UpperCAmelCase , num_beams=_UpperCAmelCase , **_UpperCAmelCase , )
__UpperCAmelCase : Optional[int] = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase )
__UpperCAmelCase : List[str] = batch['''ids''']
if num_return_sequences > 1:
__UpperCAmelCase : Any = chunks(_UpperCAmelCase , _UpperCAmelCase ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(_UpperCAmelCase ):
results.append({'''pred''': pred, '''id''': ids[i].item()} )
save_json(_UpperCAmelCase , _UpperCAmelCase )
return results, sampler.num_replicas
def a ( ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser(
epilog='''Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate''' )
parser.add_argument('''--data_dir''' , type=_UpperCAmelCase , help='''like cnn_dm/test.source''' )
parser.add_argument(
'''--model_name''' , type=_UpperCAmelCase , help='''like facebook/bart-large-cnn,t5-base, etc.''' , default='''sshleifer/distilbart-xsum-12-3''' , )
parser.add_argument('''--save_dir''' , type=_UpperCAmelCase , help='''where to save''' , default='''tmp_gen''' )
parser.add_argument('''--max_source_length''' , type=_UpperCAmelCase , default=_UpperCAmelCase )
parser.add_argument(
'''--type_path''' , type=_UpperCAmelCase , default='''test''' , help='''which subset to evaluate typically train/val/test''' )
parser.add_argument('''--task''' , type=_UpperCAmelCase , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=_UpperCAmelCase , default=8 , required=_UpperCAmelCase , help='''batch size''' )
parser.add_argument(
'''--local_rank''' , type=_UpperCAmelCase , default=-1 , required=_UpperCAmelCase , help='''should be passed by distributed.launch''' )
parser.add_argument(
'''--n_obs''' , type=_UpperCAmelCase , default=_UpperCAmelCase , required=_UpperCAmelCase , help='''How many observations. Defaults to all.''' )
parser.add_argument(
'''--num_return_sequences''' , type=_UpperCAmelCase , default=1 , required=_UpperCAmelCase , help='''How many sequences to return''' )
parser.add_argument(
'''--sync_timeout''' , type=_UpperCAmelCase , default=6_00 , required=_UpperCAmelCase , help='''How long should master process wait for other processes to finish.''' , )
parser.add_argument('''--src_lang''' , type=_UpperCAmelCase , default=_UpperCAmelCase , required=_UpperCAmelCase )
parser.add_argument('''--tgt_lang''' , type=_UpperCAmelCase , default=_UpperCAmelCase , required=_UpperCAmelCase )
parser.add_argument(
'''--prefix''' , type=_UpperCAmelCase , required=_UpperCAmelCase , default=_UpperCAmelCase , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--debug''' , action='''store_true''' )
__UpperCAmelCase : Any = time.time()
__UpperCAmelCase , __UpperCAmelCase : Any = parser.parse_known_args()
__UpperCAmelCase : List[Any] = parse_numeric_n_bool_cl_kwargs(_UpperCAmelCase )
if generate_kwargs and args.local_rank <= 0:
print(f'parsed the following generate kwargs: {generate_kwargs}' )
__UpperCAmelCase : Union[str, Any] = Path(args.save_dir + '''_tmp''' )
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase ) # this handles locking.
__UpperCAmelCase : int = list(json_save_dir.glob('''rank_*.json''' ) )
if intermediate_files:
raise ValueError(f'Found files at {json_save_dir} please move or remove them.' )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
__UpperCAmelCase : List[Any] = {}
if args.src_lang is not None:
__UpperCAmelCase : List[str] = args.src_lang
if args.tgt_lang is not None:
__UpperCAmelCase : List[Any] = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=_UpperCAmelCase )
__UpperCAmelCase , __UpperCAmelCase : int = eval_data_dir(
args.data_dir , _UpperCAmelCase , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=_UpperCAmelCase , **_UpperCAmelCase , )
if args.local_rank <= 0:
__UpperCAmelCase : int = Path(args.save_dir )
save_dir.mkdir(exist_ok=_UpperCAmelCase )
__UpperCAmelCase : List[str] = gather_results_from_each_node(_UpperCAmelCase , _UpperCAmelCase , args.sync_timeout )
__UpperCAmelCase : List[Any] = combine_partial_results(_UpperCAmelCase )
if args.num_return_sequences > 1:
__UpperCAmelCase : int = save_dir.joinpath('''pseudolabel_results.json''' )
print(f'Saving aggregated results at {save_path}, intermediate in {json_save_dir}/' )
save_json(_UpperCAmelCase , _UpperCAmelCase )
return
__UpperCAmelCase : str = Path(args.data_dir ).joinpath(args.type_path + '''.target''' )
with open(_UpperCAmelCase ) as f:
__UpperCAmelCase : int = [x.rstrip() for x in f.readlines()][: len(_UpperCAmelCase )]
# Calculate metrics, save metrics, and save _generations.txt
__UpperCAmelCase : Optional[Any] = '''translation''' in args.task
__UpperCAmelCase : Optional[int] = calculate_bleu if calc_bleu else calculate_rouge
__UpperCAmelCase : Union[str, Any] = '''bleu''' if calc_bleu else '''rouge'''
__UpperCAmelCase : Dict = score_fn(_UpperCAmelCase , _UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = len(_UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = time.time() - start_time
__UpperCAmelCase : List[str] = round(runtime / metrics['''n_obs'''] , 4 )
__UpperCAmelCase : List[str] = num_replicas
# TODO(@stas00): add whatever metadata to metrics
__UpperCAmelCase : List[Any] = save_dir.joinpath(f'{args.type_path}_{metric_name}.json' )
save_json(_UpperCAmelCase , _UpperCAmelCase , indent=_UpperCAmelCase )
print(_UpperCAmelCase )
write_txt_file(_UpperCAmelCase , save_dir.joinpath(f'{args.type_path}_generations.txt' ) )
if args.debug:
write_txt_file(_UpperCAmelCase , save_dir.joinpath(f'{args.type_path}.target' ) )
else:
shutil.rmtree(_UpperCAmelCase )
def a ( _UpperCAmelCase : int ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = []
for partial_result in partial_results:
records.extend(_UpperCAmelCase )
__UpperCAmelCase : List[str] = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x["id"] )
__UpperCAmelCase : Union[str, Any] = [x['''pred'''] for x in records]
return preds
def a ( _UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : List[str] = time.time()
logger.info('''waiting for all nodes to finish''' )
__UpperCAmelCase : Any = None
while (time.time() - start_wait) < timeout:
__UpperCAmelCase : List[Any] = list(save_dir.glob('''rank_*.json''' ) )
if len(_UpperCAmelCase ) < num_replicas:
continue
try:
# make sure all json files are fully saved
__UpperCAmelCase : Union[str, Any] = lmap(_UpperCAmelCase , _UpperCAmelCase )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError('''Rank 0 gave up on waiting for other processes''' )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 226 | 0 |
'''simple docstring'''
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
snake_case_ : Optional[Any] = 'bert-base-cased'
snake_case_ : Any = 'fp16'
snake_case_ : Optional[Any] = 'bf16'
snake_case_ : Dict = [FPaa, BFaa]
@require_fsdp
@require_cuda
class lowercase__ ( __UpperCamelCase ):
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
super().setUp()
_UpperCamelCase : int = dict(
ACCELERATE_USE_FSDP='true' ,MASTER_ADDR='localhost' ,MASTER_PORT='10999' ,RANK='0' ,LOCAL_RANK='0' ,WORLD_SIZE='1' ,)
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(_lowerCAmelCase ):
_UpperCamelCase : str = self.dist_env.copy()
_UpperCamelCase : int = F'{i + 1}'
_UpperCamelCase : List[str] = strategy
with mockenv_context(**_lowerCAmelCase ):
_UpperCamelCase : Any = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy ,ShardingStrategy(i + 1 ) )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(_lowerCAmelCase ):
_UpperCamelCase : int = self.dist_env.copy()
_UpperCamelCase : int = prefetch_policy
with mockenv_context(**_lowerCAmelCase ):
_UpperCamelCase : Union[str, Any] = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch ,BackwardPrefetch(i + 1 ) )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(_lowerCAmelCase ):
_UpperCamelCase : Union[str, Any] = self.dist_env.copy()
_UpperCamelCase : Any = state_dict_type
with mockenv_context(**_lowerCAmelCase ):
_UpperCamelCase : List[Any] = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type ,StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = AutoModel.from_pretrained(_lowerCAmelCase )
for policy in FSDP_AUTO_WRAP_POLICY:
_UpperCamelCase : str = self.dist_env.copy()
_UpperCamelCase : List[str] = policy
if policy == "TRANSFORMER_BASED_WRAP":
_UpperCamelCase : List[str] = """BertLayer"""
elif policy == "SIZE_BASED_WRAP":
_UpperCamelCase : str = """2000"""
with mockenv_context(**_lowerCAmelCase ):
_UpperCamelCase : List[Any] = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(_lowerCAmelCase )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
_UpperCamelCase : int = self.dist_env.copy()
_UpperCamelCase : Any = """TRANSFORMER_BASED_WRAP"""
_UpperCamelCase : Optional[Any] = """T5Layer"""
with mockenv_context(**_lowerCAmelCase ):
_UpperCamelCase : str = FullyShardedDataParallelPlugin()
with self.assertRaises(_lowerCAmelCase ) as cm:
fsdp_plugin.set_auto_wrap_policy(_lowerCAmelCase )
self.assertTrue('Could not find the transformer layer class to wrap in the model.' in str(cm.exception ) )
_UpperCamelCase : Any = self.dist_env.copy()
_UpperCamelCase : str = """SIZE_BASED_WRAP"""
_UpperCamelCase : Optional[int] = """0"""
with mockenv_context(**_lowerCAmelCase ):
_UpperCamelCase : Tuple = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(_lowerCAmelCase )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
_UpperCamelCase : int = self.dist_env.copy()
_UpperCamelCase : Any = mp_dtype
with mockenv_context(**_lowerCAmelCase ):
_UpperCamelCase : Optional[Any] = Accelerator()
if mp_dtype == "fp16":
_UpperCamelCase : Tuple = torch.floataa
elif mp_dtype == "bf16":
_UpperCamelCase : Tuple = torch.bfloataa
_UpperCamelCase : List[str] = MixedPrecision(param_dtype=_lowerCAmelCase ,reduce_dtype=_lowerCAmelCase ,buffer_dtype=_lowerCAmelCase )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy ,_lowerCAmelCase )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler ,_lowerCAmelCase ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(_lowerCAmelCase )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
_UpperCamelCase : List[Any] = self.dist_env.copy()
_UpperCamelCase : Tuple = str(_lowerCAmelCase ).lower()
with mockenv_context(**_lowerCAmelCase ):
_UpperCamelCase : int = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload ,CPUOffload(offload_params=_lowerCAmelCase ) )
@require_fsdp
@require_multi_gpu
@slow
class lowercase__ ( __UpperCamelCase ):
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
super().setUp()
_UpperCamelCase : Optional[Any] = 0.8_2
_UpperCamelCase : List[Any] = [
"""fsdp_shard_grad_op_transformer_based_wrap""",
"""fsdp_full_shard_transformer_based_wrap""",
]
_UpperCamelCase : Dict = {
"""multi_gpu_fp16""": 3200,
"""fsdp_shard_grad_op_transformer_based_wrap_fp16""": 2000,
"""fsdp_full_shard_transformer_based_wrap_fp16""": 1900,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
_UpperCamelCase : List[str] = 160
_UpperCamelCase : Dict = 160
_UpperCamelCase : Dict = inspect.getfile(accelerate.test_utils )
_UpperCamelCase : List[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'external_deps'] )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_UpperCamelCase : List[str] = os.path.join(self.test_scripts_folder ,'test_performance.py' )
_UpperCamelCase : Union[str, Any] = ["""accelerate""", """launch""", """--num_processes=2""", """--num_machines=1""", """--machine_rank=0""", """--use_fsdp"""]
for config in self.performance_configs:
_UpperCamelCase : Tuple = cmd.copy()
for i, strategy in enumerate(_lowerCAmelCase ):
if strategy.lower() in config:
cmd_config.append(F'--fsdp_sharding_strategy={i+1}' )
break
if "fp32" in config:
cmd_config.append('--mixed_precision=no' )
else:
cmd_config.append('--mixed_precision=fp16' )
if "cpu_offload" in config:
cmd_config.append('--fsdp_offload_params=True' )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(F'--fsdp_auto_wrap_policy={policy}' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('--fsdp_transformer_layer_cls_to_wrap=BertLayer' )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('--fsdp_min_num_params=2000' )
cmd_config.extend(
[
self.test_file_path,
F'--output_dir={self.tmpdir}',
F'--performance_lower_bound={self.performance_lower_bound}',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_lowerCAmelCase ,env=os.environ.copy() )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_UpperCamelCase : Any = os.path.join(self.test_scripts_folder ,'test_checkpointing.py' )
_UpperCamelCase : Dict = [
"""accelerate""",
"""launch""",
"""--num_processes=2""",
"""--num_machines=1""",
"""--machine_rank=0""",
"""--use_fsdp""",
"""--mixed_precision=fp16""",
"""--fsdp_transformer_layer_cls_to_wrap=BertLayer""",
]
for i, strategy in enumerate(_lowerCAmelCase ):
_UpperCamelCase : int = cmd.copy()
cmd_config.append(F'--fsdp_sharding_strategy={i+1}' )
if strategy != "FULL_SHARD":
continue
_UpperCamelCase : List[Any] = len(_lowerCAmelCase )
for state_dict_type in FSDP_STATE_DICT_TYPE:
_UpperCamelCase : List[str] = cmd_config[:state_dict_config_index]
cmd_config.append(F'--fsdp_state_dict_type={state_dict_type}' )
cmd_config.extend(
[
self.test_file_path,
F'--output_dir={self.tmpdir}',
'--partial_train_epoch=1',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_lowerCAmelCase ,env=os.environ.copy() )
_UpperCamelCase : Optional[int] = cmd_config[:-1]
_UpperCamelCase : Union[str, Any] = os.path.join(self.tmpdir ,'epoch_0' )
cmd_config.extend(
[
F'--resume_from_checkpoint={resume_from_checkpoint}',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_lowerCAmelCase ,env=os.environ.copy() )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = os.path.join(self.test_scripts_folder ,'test_peak_memory_usage.py' )
_UpperCamelCase : Dict = [
"""accelerate""",
"""launch""",
"""--num_processes=2""",
"""--num_machines=1""",
"""--machine_rank=0""",
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
_UpperCamelCase : Any = cmd.copy()
if "fp16" in spec:
cmd_config.extend(['--mixed_precision=fp16'] )
else:
cmd_config.extend(['--mixed_precision=no'] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(['--use_fsdp'] )
for i, strategy in enumerate(_lowerCAmelCase ):
if strategy.lower() in spec:
cmd_config.append(F'--fsdp_sharding_strategy={i+1}' )
break
if "cpu_offload" in spec:
cmd_config.append('--fsdp_offload_params=True' )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(F'--fsdp_auto_wrap_policy={policy}' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('--fsdp_transformer_layer_cls_to_wrap=BertLayer' )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('--fsdp_min_num_params=2000' )
cmd_config.extend(
[
self.test_file_path,
F'--output_dir={self.tmpdir}',
F'--peak_memory_upper_bound={peak_mem_upper_bound}',
F'--n_train={self.n_train}',
F'--n_val={self.n_val}',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_lowerCAmelCase ,env=os.environ.copy() )
| 370 |
'''simple docstring'''
class lowercase__ :
def __init__( self : List[str] ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Any ):
'''simple docstring'''
_UpperCamelCase : Dict = None
_UpperCamelCase : List[Any] = None
_UpperCamelCase : int = graph
self._normalize_graph(lowerCamelCase__ ,lowerCamelCase__ )
_UpperCamelCase : List[str] = len(lowerCamelCase__ )
_UpperCamelCase : Tuple = None
def UpperCamelCase_ ( self : Any ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Any ):
'''simple docstring'''
if sources is int:
_UpperCamelCase : Optional[int] = [sources]
if sinks is int:
_UpperCamelCase : Union[str, Any] = [sinks]
if len(lowerCamelCase__ ) == 0 or len(lowerCamelCase__ ) == 0:
return
_UpperCamelCase : List[str] = sources[0]
_UpperCamelCase : str = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(lowerCamelCase__ ) > 1 or len(lowerCamelCase__ ) > 1:
_UpperCamelCase : Dict = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
_UpperCamelCase : Tuple = len(self.graph ) + 1
for room in self.graph:
room.insert(0 ,0 )
self.graph.insert(0 ,[0] * size )
for i in sources:
_UpperCamelCase : List[Any] = max_input_flow
_UpperCamelCase : Tuple = 0
_UpperCamelCase : int = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
_UpperCamelCase : Optional[int] = max_input_flow
_UpperCamelCase : Optional[int] = size - 1
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
if self.maximum_flow_algorithm is None:
raise Exception('You need to set maximum flow algorithm before.' )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def UpperCamelCase_ ( self : Any ,lowerCamelCase__ : Any ):
'''simple docstring'''
_UpperCamelCase : str = algorithm(self )
class lowercase__ :
def __init__( self : List[str] ,lowerCamelCase__ : Optional[int] ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = flow_network
_UpperCamelCase : List[str] = flow_network.verticesCount
_UpperCamelCase : List[str] = flow_network.sourceIndex
_UpperCamelCase : Any = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
_UpperCamelCase : List[Any] = flow_network.graph
_UpperCamelCase : Any = False
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
if not self.executed:
self._algorithm()
_UpperCamelCase : Any = True
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
pass
class lowercase__ ( lowercase ):
def __init__( self : Union[str, Any] ,lowerCamelCase__ : List[str] ):
'''simple docstring'''
super().__init__(lowerCamelCase__ )
# use this to save your result
_UpperCamelCase : Tuple = -1
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
if not self.executed:
raise Exception('You should execute algorithm before using its result!' )
return self.maximum_flow
class lowercase__ ( lowercase ):
def __init__( self : Optional[Any] ,lowerCamelCase__ : Dict ):
'''simple docstring'''
super().__init__(lowerCamelCase__ )
_UpperCamelCase : Dict = [[0] * self.verticies_count for i in range(self.verticies_count )]
_UpperCamelCase : Optional[Any] = [0] * self.verticies_count
_UpperCamelCase : List[str] = [0] * self.verticies_count
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_UpperCamelCase : Dict = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
_UpperCamelCase : List[str] = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
_UpperCamelCase : int = 0
while i < len(lowerCamelCase__ ):
_UpperCamelCase : List[Any] = vertices_list[i]
_UpperCamelCase : str = self.heights[vertex_index]
self.process_vertex(lowerCamelCase__ )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 ,vertices_list.pop(lowerCamelCase__ ) )
_UpperCamelCase : Dict = 0
else:
i += 1
_UpperCamelCase : Optional[Any] = sum(self.preflow[self.source_index] )
def UpperCamelCase_ ( self : Union[str, Any] ,lowerCamelCase__ : Dict ):
'''simple docstring'''
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(lowerCamelCase__ ,lowerCamelCase__ )
self.relabel(lowerCamelCase__ )
def UpperCamelCase_ ( self : Tuple ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : List[str] ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = min(
self.excesses[from_index] ,self.graph[from_index][to_index] - self.preflow[from_index][to_index] ,)
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def UpperCamelCase_ ( self : List[str] ,lowerCamelCase__ : Optional[int] ):
'''simple docstring'''
_UpperCamelCase : Tuple = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
_UpperCamelCase : List[Any] = self.heights[to_index]
if min_height is not None:
_UpperCamelCase : Any = min_height + 1
if __name__ == "__main__":
snake_case_ : List[str] = [0]
snake_case_ : int = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
snake_case_ : int = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
snake_case_ : List[Any] = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
snake_case_ : Tuple = flow_network.find_maximum_flow()
print(F"""maximum flow is {maximum_flow}""")
| 236 | 0 |
"""simple docstring"""
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
_a = get_tests_dir('fixtures/test_sentencepiece.model')
_a = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
_a = 'pt' if is_torch_available() else 'tf'
@require_sentencepiece
@require_tokenizers
class A_ (lowercase__ ,unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = CamembertTokenizer
SCREAMING_SNAKE_CASE__ : List[Any] = CamembertTokenizerFast
SCREAMING_SNAKE_CASE__ : Tuple = True
SCREAMING_SNAKE_CASE__ : str = True
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase_ : str = CamembertTokenizer(lowercase_ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : str = "<pad>"
UpperCAmelCase_ : str = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ) , lowercase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ) , lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>NOTUSED" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(lowercase_ ) , 1004 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1005 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = CamembertTokenizer(lowercase_ )
tokenizer.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : List[str] = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
UpperCAmelCase_ : Tuple = "I was born in 92000, and this is falsé."
UpperCAmelCase_ : List[str] = tokenizer.encode(lowercase_ )
UpperCAmelCase_ : Optional[int] = rust_tokenizer.encode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
UpperCAmelCase_ : Tuple = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
UpperCAmelCase_ : Any = rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
UpperCAmelCase_ : List[Any] = tokenizer.convert_ids_to_tokens(lowercase_ )
UpperCAmelCase_ : List[Any] = rust_tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
UpperCAmelCase_ : Optional[int] = self.get_tokenizer()
UpperCAmelCase_ : List[str] = self.get_rust_tokenizer()
UpperCAmelCase_ : str = "I was born in 92000, and this is falsé."
UpperCAmelCase_ : List[str] = tokenizer.tokenize(lowercase_ )
UpperCAmelCase_ : Optional[Any] = rust_tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
UpperCAmelCase_ : Optional[int] = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
UpperCAmelCase_ : Union[str, Any] = rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
UpperCAmelCase_ : Any = self.get_rust_tokenizer()
UpperCAmelCase_ : Dict = tokenizer.encode(lowercase_ )
UpperCAmelCase_ : Optional[Any] = rust_tokenizer.encode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
# fmt: off
UpperCAmelCase_ : int = {"input_ids": [[5, 54, 7196, 297, 30, 23, 776, 18, 11, 3215, 3705, 8252, 22, 3164, 1181, 2116, 29, 16, 813, 25, 791, 3314, 20, 3446, 38, 2_7575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9088, 20, 1517, 8, 2_2804, 1_8818, 10, 38, 629, 607, 607, 142, 19, 7196, 867, 56, 1_0326, 24, 2267, 20, 416, 5072, 1_5612, 233, 734, 7, 2399, 27, 16, 3015, 1649, 7, 24, 20, 4338, 2399, 27, 13, 3400, 14, 13, 6189, 8, 930, 9, 6]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
UpperCAmelCase_ : Tuple = [
"Le transformeur est un modèle d'apprentissage profond introduit en 2017, "
"utilisé principalement dans le domaine du traitement automatique des langues (TAL).",
"À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus "
"pour gérer des données séquentielles, telles que le langage naturel, pour des tâches "
"telles que la traduction et la synthèse de texte.",
]
self.tokenizer_integration_test_util(
expected_encoding=lowercase_ , model_name="camembert-base" , revision="3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf" , sequences=lowercase_ , )
| 61 |
import argparse
import datetime
def lowerCAmelCase__( lowercase : str ) -> str:
__snake_case : int = {
"0": "Sunday",
"1": "Monday",
"2": "Tuesday",
"3": "Wednesday",
"4": "Thursday",
"5": "Friday",
"6": "Saturday",
}
__snake_case : int = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(lowercase ) < 11:
raise ValueError("Must be 10 characters long" )
# Get month
__snake_case : int = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError("Month must be between 1 - 12" )
__snake_case : str = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("Date separator must be '-' or '/'" )
# Get day
__snake_case : int = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError("Date must be between 1 - 31" )
# Get second separator
__snake_case : str = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("Date separator must be '-' or '/'" )
# Get year
__snake_case : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 8500:
raise ValueError(
"Year out of range. There has to be some sort of limit...right?" )
# Get datetime obj for validation
__snake_case : str = datetime.date(int(lowercase ) , int(lowercase ) , int(lowercase ) )
# Start math
if m <= 2:
__snake_case : Optional[Any] = y - 1
__snake_case : Tuple = m + 12
# maths var
__snake_case : int = int(str(lowercase )[:2] )
__snake_case : int = int(str(lowercase )[2:] )
__snake_case : int = int(2.6 * m - 5.3_9 )
__snake_case : int = int(c / 4 )
__snake_case : int = int(k / 4 )
__snake_case : int = int(d + k )
__snake_case : int = int(t + u + v + x )
__snake_case : int = int(z - (2 * c) )
__snake_case : int = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError("The date was evaluated incorrectly. Contact developer." )
# Response
__snake_case : str = f"""Your date {date_input}, is a {days[str(lowercase )]}!"""
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCamelCase = argparse.ArgumentParser(
description=(
'''Find out what day of the week nearly any date is or was. Enter '''
'''date as a string in the mm-dd-yyyy or mm/dd/yyyy format'''
)
)
parser.add_argument(
'''date_input''', type=str, help='''Date as a string (mm-dd-yyyy or mm/dd/yyyy)'''
)
_UpperCamelCase = parser.parse_args()
zeller(args.date_input)
| 326 | 0 |
import numpy as np
def lowerCAmelCase_ ( __a , __a , __a = 1e-12 , __a = 100 , ) -> tuple[float, np.ndarray]:
"""simple docstring"""
assert np.shape(__a )[0] == np.shape(__a )[1]
# Ensure proper dimensionality.
assert np.shape(__a )[0] == np.shape(__a )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(__a ) == np.iscomplexobj(__a )
lowerCamelCase__: str =np.iscomplexobj(__a )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(__a , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
lowerCamelCase__: int =False
lowerCamelCase__: List[str] =0
lowerCamelCase__: int =0
lowerCamelCase__: int =1e12
while not convergence:
# Multiple matrix by the vector.
lowerCamelCase__: Union[str, Any] =np.dot(__a , __a )
# Normalize the resulting output vector.
lowerCamelCase__: int =w / np.linalg.norm(__a )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
lowerCamelCase__: Optional[Any] =vector.conj().T if is_complex else vector.T
lowerCamelCase__: Optional[int] =np.dot(__a , np.dot(__a , __a ) )
# Check convergence.
lowerCamelCase__: Tuple =np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
lowerCamelCase__: Any =True
lowerCamelCase__: List[Any] =lambda_
if is_complex:
lowerCamelCase__: Dict =np.real(lambda_ )
return lambda_, vector
def lowerCAmelCase_ ( ) -> None:
"""simple docstring"""
lowerCamelCase__: Union[str, Any] =np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
lowerCamelCase__: Dict =np.array([41, 4, 20] )
lowerCamelCase__: Any =real_input_matrix.astype(np.complexaaa )
lowerCamelCase__: int =np.triu(1J * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
lowerCamelCase__: List[Any] =np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
lowerCamelCase__: Optional[Any] =real_input_matrix
lowerCamelCase__: int =real_vector
elif problem_type == "complex":
lowerCamelCase__: Dict =complex_input_matrix
lowerCamelCase__: str =complex_vector
# Our implementation.
lowerCamelCase__ , lowerCamelCase__: str =power_iteration(__a , __a )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
lowerCamelCase__ , lowerCamelCase__: int =np.linalg.eigh(__a )
# Last eigenvalue is the maximum one.
lowerCamelCase__: Any =eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
lowerCamelCase__: Tuple =eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1e-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(__a ) - np.abs(__a ) ) <= 1e-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 273 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Tuple:
'''simple docstring'''
lowerCamelCase__: List[str] =inspect.getfile(accelerate.test_utils)
lowerCamelCase__: str =os.path.sep.join(
mod_file.split(os.path.sep)[:-1] + ["scripts", "external_deps", "test_metrics.py"])
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
lowerCamelCase__: str =test_metrics
@require_cpu
def SCREAMING_SNAKE_CASE_ (self : int) ->Any:
'''simple docstring'''
debug_launcher(self.test_metrics.main , num_processes=1)
@require_cpu
def SCREAMING_SNAKE_CASE_ (self : int) ->int:
'''simple docstring'''
debug_launcher(self.test_metrics.main)
@require_single_gpu
def SCREAMING_SNAKE_CASE_ (self : Any) ->int:
'''simple docstring'''
self.test_metrics.main()
@require_multi_gpu
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Dict:
'''simple docstring'''
print(F"""Found {torch.cuda.device_count()} devices.""")
lowerCamelCase__: Optional[Any] =["torchrun", F"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1):
execute_subprocess_async(UpperCAmelCase_ , env=os.environ.copy())
| 273 | 1 |
"""simple docstring"""
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
UpperCAmelCase__ : Optional[int] = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
UpperCAmelCase__ : List[Any] = logging.WARNING
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = os.getenv("""DATASETS_VERBOSITY""" ,_snake_case )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f'''Unknown option DATASETS_VERBOSITY={env_level_str}, '''
f'''has to be one of: { ', '.join(log_levels.keys() ) }''' )
return _default_log_level
def lowercase_ ( ):
return __name__.split(""".""" )[0]
def lowercase_ ( ):
return logging.getLogger(_get_library_name() )
def lowercase_ ( ):
# Apply our default configuration to the library root logger.
SCREAMING_SNAKE_CASE__ : Optional[Any] = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def lowercase_ ( _snake_case = None ):
if name is None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = _get_library_name()
return logging.getLogger(_snake_case )
def lowercase_ ( ):
return _get_library_root_logger().getEffectiveLevel()
def lowercase_ ( _snake_case ):
_get_library_root_logger().setLevel(_snake_case )
def lowercase_ ( ):
return set_verbosity(_snake_case )
def lowercase_ ( ):
return set_verbosity(_snake_case )
def lowercase_ ( ):
return set_verbosity(_snake_case )
def lowercase_ ( ):
return set_verbosity(_snake_case )
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Tuple = False
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : str = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> int: # pylint: disable=unused-argument
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = args[0] if args else None
def __iter__(self ) -> int:
"""simple docstring"""
return iter(self._iterator )
def __getattr__(self , SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
def empty_fn(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__(self ) -> Dict:
"""simple docstring"""
return self
def __exit__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
return
UpperCAmelCase__ : str = True
class lowerCAmelCase_ :
"""simple docstring"""
def __call__(self , *SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , **SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
else:
return EmptyTqdm(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
UpperCAmelCase__ : Tuple = _tqdm_cls()
def lowercase_ ( ):
global _tqdm_active
return bool(_tqdm_active )
def lowercase_ ( ):
global _tqdm_active
SCREAMING_SNAKE_CASE__ : Union[str, Any] = True
def lowercase_ ( ):
global _tqdm_active
SCREAMING_SNAKE_CASE__ : str = False
| 25 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ : str = logging.get_logger(__name__)
UpperCAmelCase__ : Optional[int] = {
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : int = '''yolos'''
def __init__(self , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=30_72 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=[5_12, 8_64] , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=1_00 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.1 , **SCREAMING_SNAKE_CASE__ , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE__ : int = num_hidden_layers
SCREAMING_SNAKE_CASE__ : str = num_attention_heads
SCREAMING_SNAKE_CASE__ : List[str] = intermediate_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE__ : Dict = layer_norm_eps
SCREAMING_SNAKE_CASE__ : List[str] = image_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = patch_size
SCREAMING_SNAKE_CASE__ : List[str] = num_channels
SCREAMING_SNAKE_CASE__ : List[str] = qkv_bias
SCREAMING_SNAKE_CASE__ : Optional[int] = num_detection_tokens
SCREAMING_SNAKE_CASE__ : Optional[Any] = use_mid_position_embeddings
SCREAMING_SNAKE_CASE__ : List[str] = auxiliary_loss
# Hungarian matcher
SCREAMING_SNAKE_CASE__ : Optional[Any] = class_cost
SCREAMING_SNAKE_CASE__ : List[str] = bbox_cost
SCREAMING_SNAKE_CASE__ : List[Any] = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE__ : Optional[Any] = bbox_loss_coefficient
SCREAMING_SNAKE_CASE__ : List[str] = giou_loss_coefficient
SCREAMING_SNAKE_CASE__ : int = eos_coefficient
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Dict = version.parse('''1.11''' )
@property
def __magic_name__ (self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __magic_name__ (self ) -> float:
"""simple docstring"""
return 1E-4
@property
def __magic_name__ (self ) -> int:
"""simple docstring"""
return 12
| 25 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase__ : Dict = {
'xlm-mlm-en-2048': 'https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json',
'xlm-mlm-ende-1024': 'https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json',
'xlm-mlm-enfr-1024': 'https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json',
'xlm-mlm-enro-1024': 'https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json',
'xlm-mlm-tlm-xnli15-1024': 'https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json',
'xlm-mlm-xnli15-1024': 'https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json',
'xlm-clm-enfr-1024': 'https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json',
'xlm-clm-ende-1024': 'https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json',
'xlm-mlm-17-1280': 'https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json',
'xlm-mlm-100-1280': 'https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json',
}
class lowerCamelCase_ ( __UpperCamelCase ):
'''simple docstring'''
lowercase_ = "xlm"
lowercase_ = {
"hidden_size": "emb_dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
"n_words": "vocab_size", # For backward compatibility
}
def __init__( self : List[Any] , _lowerCAmelCase : Optional[Any]=30_145 , _lowerCAmelCase : Optional[Any]=2_048 , _lowerCAmelCase : Dict=12 , _lowerCAmelCase : int=16 , _lowerCAmelCase : Tuple=0.1 , _lowerCAmelCase : List[Any]=0.1 , _lowerCAmelCase : str=True , _lowerCAmelCase : List[str]=False , _lowerCAmelCase : Tuple=False , _lowerCAmelCase : Dict=False , _lowerCAmelCase : Dict=1 , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : Optional[Any]=512 , _lowerCAmelCase : List[Any]=2_048**-0.5 , _lowerCAmelCase : List[str]=1E-12 , _lowerCAmelCase : List[Any]=0.02 , _lowerCAmelCase : List[str]=0 , _lowerCAmelCase : Optional[Any]=1 , _lowerCAmelCase : Dict=2 , _lowerCAmelCase : List[str]=3 , _lowerCAmelCase : Tuple=5 , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : Tuple="first" , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : Dict=None , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : Any=0.1 , _lowerCAmelCase : Tuple=5 , _lowerCAmelCase : List[str]=5 , _lowerCAmelCase : Optional[Any]=0 , _lowerCAmelCase : Tuple=0 , _lowerCAmelCase : Union[str, Any]=2 , _lowerCAmelCase : Union[str, Any]=0 , **_lowerCAmelCase : Tuple , ):
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = emb_dim
SCREAMING_SNAKE_CASE_ = n_layers
SCREAMING_SNAKE_CASE_ = n_heads
SCREAMING_SNAKE_CASE_ = dropout
SCREAMING_SNAKE_CASE_ = attention_dropout
SCREAMING_SNAKE_CASE_ = gelu_activation
SCREAMING_SNAKE_CASE_ = sinusoidal_embeddings
SCREAMING_SNAKE_CASE_ = causal
SCREAMING_SNAKE_CASE_ = asm
SCREAMING_SNAKE_CASE_ = n_langs
SCREAMING_SNAKE_CASE_ = use_lang_emb
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = bos_index
SCREAMING_SNAKE_CASE_ = eos_index
SCREAMING_SNAKE_CASE_ = pad_index
SCREAMING_SNAKE_CASE_ = unk_index
SCREAMING_SNAKE_CASE_ = mask_index
SCREAMING_SNAKE_CASE_ = is_encoder
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = embed_init_std
SCREAMING_SNAKE_CASE_ = init_std
SCREAMING_SNAKE_CASE_ = summary_type
SCREAMING_SNAKE_CASE_ = summary_use_proj
SCREAMING_SNAKE_CASE_ = summary_activation
SCREAMING_SNAKE_CASE_ = summary_proj_to_labels
SCREAMING_SNAKE_CASE_ = summary_first_dropout
SCREAMING_SNAKE_CASE_ = start_n_top
SCREAMING_SNAKE_CASE_ = end_n_top
SCREAMING_SNAKE_CASE_ = mask_token_id
SCREAMING_SNAKE_CASE_ = lang_id
if "n_words" in kwargs:
SCREAMING_SNAKE_CASE_ = kwargs["""n_words"""]
super().__init__(pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
class lowerCamelCase_ ( __UpperCamelCase ):
'''simple docstring'''
@property
def lowerCAmelCase_ ( self : Dict ):
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
SCREAMING_SNAKE_CASE_ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] ) | 370 |
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def UpperCAmelCase_ ( __UpperCAmelCase : List[str] ) -> List[Any]:
# vision encoder
if "img_encoder.pos_embed" in name:
SCREAMING_SNAKE_CASE_ = name.replace('img_encoder.pos_embed' , 'vision_model.embeddings.position_embeddings' )
if "img_encoder.patch_embed.proj" in name:
SCREAMING_SNAKE_CASE_ = name.replace('img_encoder.patch_embed.proj' , 'vision_model.embeddings.patch_embeddings.projection' )
if "img_encoder.patch_embed.norm" in name:
SCREAMING_SNAKE_CASE_ = name.replace('img_encoder.patch_embed.norm' , 'vision_model.embeddings.layernorm' )
if "img_encoder.layers" in name:
SCREAMING_SNAKE_CASE_ = name.replace('img_encoder.layers' , 'vision_model.encoder.stages' )
if "blocks" in name and "res" not in name:
SCREAMING_SNAKE_CASE_ = name.replace('blocks' , 'layers' )
if "attn" in name and "pre_assign" not in name:
SCREAMING_SNAKE_CASE_ = name.replace('attn' , 'self_attn' )
if "proj" in name and "self_attn" in name and "text" not in name:
SCREAMING_SNAKE_CASE_ = name.replace('proj' , 'out_proj' )
if "pre_assign_attn.attn.proj" in name:
SCREAMING_SNAKE_CASE_ = name.replace('pre_assign_attn.attn.proj' , 'pre_assign_attn.attn.out_proj' )
if "norm1" in name:
SCREAMING_SNAKE_CASE_ = name.replace('norm1' , 'layer_norm1' )
if "norm2" in name and "pre_assign" not in name:
SCREAMING_SNAKE_CASE_ = name.replace('norm2' , 'layer_norm2' )
if "img_encoder.norm" in name:
SCREAMING_SNAKE_CASE_ = name.replace('img_encoder.norm' , 'vision_model.layernorm' )
# text encoder
if "text_encoder.token_embedding" in name:
SCREAMING_SNAKE_CASE_ = name.replace('text_encoder.token_embedding' , 'text_model.embeddings.token_embedding' )
if "text_encoder.positional_embedding" in name:
SCREAMING_SNAKE_CASE_ = name.replace('text_encoder.positional_embedding' , 'text_model.embeddings.position_embedding.weight' )
if "text_encoder.transformer.resblocks." in name:
SCREAMING_SNAKE_CASE_ = name.replace('text_encoder.transformer.resblocks.' , 'text_model.encoder.layers.' )
if "ln_1" in name:
SCREAMING_SNAKE_CASE_ = name.replace('ln_1' , 'layer_norm1' )
if "ln_2" in name:
SCREAMING_SNAKE_CASE_ = name.replace('ln_2' , 'layer_norm2' )
if "c_fc" in name:
SCREAMING_SNAKE_CASE_ = name.replace('c_fc' , 'fc1' )
if "c_proj" in name:
SCREAMING_SNAKE_CASE_ = name.replace('c_proj' , 'fc2' )
if "text_encoder" in name:
SCREAMING_SNAKE_CASE_ = name.replace('text_encoder' , 'text_model' )
if "ln_final" in name:
SCREAMING_SNAKE_CASE_ = name.replace('ln_final' , 'final_layer_norm' )
# projection layers
if "img_projector.linear_hidden." in name:
SCREAMING_SNAKE_CASE_ = name.replace('img_projector.linear_hidden.' , 'visual_projection.' )
if "img_projector.linear_out." in name:
SCREAMING_SNAKE_CASE_ = name.replace('img_projector.linear_out.' , 'visual_projection.3.' )
if "text_projector.linear_hidden" in name:
SCREAMING_SNAKE_CASE_ = name.replace('text_projector.linear_hidden' , 'text_projection' )
if "text_projector.linear_out" in name:
SCREAMING_SNAKE_CASE_ = name.replace('text_projector.linear_out' , 'text_projection.3' )
return name
def UpperCAmelCase_ ( __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : int ) -> List[str]:
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE_ = orig_state_dict.pop(__UpperCAmelCase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
SCREAMING_SNAKE_CASE_ = key.split('.' )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = int(key_split[2] ), int(key_split[4] )
SCREAMING_SNAKE_CASE_ = config.vision_config.hidden_size
if "weight" in key:
SCREAMING_SNAKE_CASE_ = val[:dim, :]
SCREAMING_SNAKE_CASE_ = val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE_ = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE_ = val[:dim]
SCREAMING_SNAKE_CASE_ = val[dim : dim * 2]
SCREAMING_SNAKE_CASE_ = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
SCREAMING_SNAKE_CASE_ = key.split('.' )
SCREAMING_SNAKE_CASE_ = int(key_split[3] )
SCREAMING_SNAKE_CASE_ = config.text_config.hidden_size
if "weight" in key:
SCREAMING_SNAKE_CASE_ = val[:dim, :]
SCREAMING_SNAKE_CASE_ = val[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE_ = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE_ = val[:dim]
SCREAMING_SNAKE_CASE_ = val[dim : dim * 2]
SCREAMING_SNAKE_CASE_ = val[-dim:]
else:
SCREAMING_SNAKE_CASE_ = rename_key(__UpperCAmelCase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
SCREAMING_SNAKE_CASE_ = val.squeeze_()
else:
SCREAMING_SNAKE_CASE_ = val
return orig_state_dict
def UpperCAmelCase_ ( ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
SCREAMING_SNAKE_CASE_ = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw )
return im
@torch.no_grad()
def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : str , __UpperCAmelCase : Dict="groupvit-gcc-yfcc" , __UpperCAmelCase : Any=False ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = GroupViTConfig()
SCREAMING_SNAKE_CASE_ = GroupViTModel(__UpperCAmelCase ).eval()
SCREAMING_SNAKE_CASE_ = torch.load(__UpperCAmelCase , map_location='cpu' )['model']
SCREAMING_SNAKE_CASE_ = convert_state_dict(__UpperCAmelCase , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = model.load_state_dict(__UpperCAmelCase , strict=__UpperCAmelCase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(__UpperCAmelCase ) == 0)
# verify result
SCREAMING_SNAKE_CASE_ = CLIPProcessor.from_pretrained('openai/clip-vit-base-patch32' )
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = processor(text=['a photo of a cat', 'a photo of a dog'] , images=__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors='pt' )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**__UpperCAmelCase )
if model_name == "groupvit-gcc-yfcc":
SCREAMING_SNAKE_CASE_ = torch.tensor([[1_3.3_5_2_3, 6.3_6_2_9]] )
elif model_name == "groupvit-gcc-redcaps":
SCREAMING_SNAKE_CASE_ = torch.tensor([[1_6.1_8_7_3, 8.6_2_3_0]] )
else:
raise ValueError(f"Model name {model_name} not supported." )
assert torch.allclose(outputs.logits_per_image , __UpperCAmelCase , atol=1E-3 )
processor.save_pretrained(__UpperCAmelCase )
model.save_pretrained(__UpperCAmelCase )
print('Successfully saved processor and model to' , __UpperCAmelCase )
if push_to_hub:
print('Pushing to the hub...' )
processor.push_to_hub(__UpperCAmelCase , organization='nielsr' )
model.push_to_hub(__UpperCAmelCase , organization='nielsr' )
if __name__ == "__main__":
lowerCamelCase__ : int = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to dump the processor and PyTorch model.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to GroupViT checkpoint')
parser.add_argument(
'--model_name',
default='groupvit-gccy-fcc',
type=str,
help='Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.',
)
lowerCamelCase__ : List[str] = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub) | 210 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class _A ( unittest.TestCase ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=7 , __UpperCAmelCase=3 , __UpperCAmelCase=18 , __UpperCAmelCase=30 , __UpperCAmelCase=400 , __UpperCAmelCase=True , __UpperCAmelCase=32 , __UpperCAmelCase=True , ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : Any = parent
__UpperCAmelCase : Any = batch_size
__UpperCAmelCase : List[str] = num_channels
__UpperCAmelCase : Dict = image_size
__UpperCAmelCase : Dict = min_resolution
__UpperCAmelCase : str = max_resolution
__UpperCAmelCase : Optional[int] = do_resize
__UpperCAmelCase : List[Any] = size_divisor
__UpperCAmelCase : Optional[Any] = do_rescale
def __A ( self ) -> List[Any]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class _A ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : str = GLPNImageProcessor if is_vision_available() else None
def __A ( self ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = GLPNImageProcessingTester(self )
@property
def __A ( self ) -> List[Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCAmelCase , """do_resize""" ) )
self.assertTrue(hasattr(__UpperCAmelCase , """size_divisor""" ) )
self.assertTrue(hasattr(__UpperCAmelCase , """resample""" ) )
self.assertTrue(hasattr(__UpperCAmelCase , """do_rescale""" ) )
def __A ( self ) -> Any:
'''simple docstring'''
pass
def __A ( self ) -> List[Any]:
'''simple docstring'''
# Initialize image_processing
__UpperCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCAmelCase : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__UpperCAmelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def __A ( self ) -> Dict:
'''simple docstring'''
# Initialize image_processing
__UpperCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , numpify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__UpperCAmelCase : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def __A ( self ) -> int:
'''simple docstring'''
# Initialize image_processing
__UpperCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCAmelCase : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__UpperCAmelCase : List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 254 |
'''simple docstring'''
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
_UpperCamelCase = None
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
_UpperCamelCase = {
'''vocab_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/tokenizer.json''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/tokenizer.json''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/tokenizer.json''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/tokenizer.json''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/tokenizer.json''',
},
}
# TODO(PVP) - this should be removed in Transformers v5
_UpperCamelCase = {
'''t5-small''': 512,
'''t5-base''': 512,
'''t5-large''': 512,
'''t5-3b''': 512,
'''t5-11b''': 512,
}
class _A ( __SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : str = ["input_ids", "attention_mask"]
_SCREAMING_SNAKE_CASE : Optional[Any] = TaTokenizer
_SCREAMING_SNAKE_CASE : List[int] = []
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="</s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase=100 , __UpperCAmelCase=None , **__UpperCAmelCase , ) -> List[Any]:
'''simple docstring'''
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
__UpperCAmelCase : List[Any] = [f'<extra_id_{i}>' for i in range(__UpperCAmelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
__UpperCAmelCase : Any = len(set(filter(lambda __UpperCAmelCase : bool("""extra_id_""" in str(__UpperCAmelCase ) ) , __UpperCAmelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
""" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"""
""" tokens""" )
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , extra_ids=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , **__UpperCAmelCase , )
__UpperCAmelCase : Optional[int] = vocab_file
__UpperCAmelCase : Any = False if not self.vocab_file else True
__UpperCAmelCase : Optional[int] = extra_ids
@staticmethod
def __A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
__UpperCAmelCase : int = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"""This tokenizer was incorrectly instantiated with a model max length of"""
f' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'
""" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"""
""" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"""
f' {pretrained_model_name_or_path} automatically truncating your input to'
f' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'
f' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'
""" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"""
""" instantiate this tokenizer with `model_max_length` set to your preferred value.""" , __UpperCAmelCase , )
return max_model_length
def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__UpperCAmelCase : Any = os.path.join(
__UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ):
copyfile(self.vocab_file , __UpperCAmelCase )
logger.info(f'Copy vocab file to {out_vocab_file}' )
return (out_vocab_file,)
def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
'''simple docstring'''
__UpperCAmelCase : str = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
__UpperCAmelCase : Optional[Any] = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __A ( self ) -> Any:
'''simple docstring'''
return list(
set(filter(lambda __UpperCAmelCase : bool(re.search(r"""<extra_id_\d+>""" , __UpperCAmelCase ) ) is not None , self.additional_special_tokens ) ) )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
return [self.convert_tokens_to_ids(__UpperCAmelCase ) for token in self.get_sentinel_tokens()]
| 254 | 1 |
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
__a = get_tests_dir('''fixtures/test_sentencepiece.model''')
__a = {'''target_lang''': '''fi''', '''source_lang''': '''en'''}
__a = '''>>zh<<'''
__a = '''Helsinki-NLP/'''
if is_torch_available():
__a = '''pt'''
elif is_tf_available():
__a = '''tf'''
else:
__a = '''jax'''
@require_sentencepiece
class __SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
A : str = MarianTokenizer
A : Dict = False
A : List[Any] = True
def __lowerCamelCase ( self ):
super().setUp()
lowercase : str = ['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>''']
lowercase : List[Any] = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
lowercase : Any = Path(self.tmpdirname )
save_json(lowercase_ , save_dir / VOCAB_FILES_NAMES['''vocab'''] )
save_json(lowercase_ , save_dir / VOCAB_FILES_NAMES['''tokenizer_config_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(lowercase_ , save_dir / VOCAB_FILES_NAMES['''source_spm'''] )
copyfile(lowercase_ , save_dir / VOCAB_FILES_NAMES['''target_spm'''] )
lowercase : Optional[Any] = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCamelCase ( self , **SCREAMING_SNAKE_CASE__ ):
return MarianTokenizer.from_pretrained(self.tmpdirname , **lowercase_ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
return (
"This is a test",
"This is a test",
)
def __lowerCamelCase ( self ):
lowercase : Union[str, Any] = '''</s>'''
lowercase : List[Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ) , lowercase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ) , lowercase_ )
def __lowerCamelCase ( self ):
lowercase : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''</s>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''<pad>''' )
self.assertEqual(len(lowercase_ ) , 9 )
def __lowerCamelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def __lowerCamelCase ( self ):
lowercase : Any = MarianTokenizer.from_pretrained(f"""{ORG_NAME}opus-mt-en-de""" )
lowercase : int = en_de_tokenizer(['''I am a small frog'''] , return_tensors=lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
lowercase : Any = [38, 121, 14, 697, 38848, 0]
self.assertListEqual(lowercase_ , batch.input_ids[0] )
lowercase : List[Any] = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(lowercase_ )
lowercase : List[Any] = [x.name for x in Path(lowercase_ ).glob('''*''' )]
self.assertIn('''source.spm''' , lowercase_ )
MarianTokenizer.from_pretrained(lowercase_ )
def __lowerCamelCase ( self ):
lowercase : Optional[int] = self.get_tokenizer()
lowercase : List[str] = tok(
['''I am a small frog''' * 1000, '''I am a small frog'''] , padding=lowercase_ , truncation=lowercase_ , return_tensors=lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def __lowerCamelCase ( self ):
lowercase : Union[str, Any] = self.get_tokenizer()
lowercase : int = tok(['''I am a tiny frog''', '''I am a small frog'''] , padding=lowercase_ , return_tensors=lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def __lowerCamelCase ( self ):
lowercase : Optional[int] = {'''input_ids''': [[43495, 462, 20, 42164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 38999, 6, 8, 464, 132, 1703, 492, 13, 4669, 37867, 13, 7525, 27, 1593, 988, 13, 33972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 12338, 2, 13958, 387, 2, 3629, 6953, 188, 2900, 2, 13958, 8011, 11501, 23, 8460, 4073, 34009, 20, 435, 11439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 37867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 26453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 10767, 6, 316, 304, 4239, 3, 0], [148, 15722, 19, 1839, 12, 1350, 13, 22327, 5082, 5418, 47567, 35938, 59, 318, 19552, 108, 2183, 54, 14976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 19088, 3, 0, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100], [36, 6395, 12570, 39147, 11597, 6, 266, 4, 45405, 7296, 3, 0, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase_ , model_name='''Helsinki-NLP/opus-mt-en-de''' , revision='''1a8c2263da11e68e50938f97e10cd57820bd504c''' , decode_kwargs={'''use_source_tokenizer''': True} , )
def __lowerCamelCase ( self ):
lowercase : List[str] = MarianTokenizer.from_pretrained('''hf-internal-testing/test-marian-two-vocabs''' )
lowercase : Tuple = '''Tämä on testi'''
lowercase : List[str] = '''This is a test'''
lowercase : Union[str, Any] = [76, 7, 2047, 2]
lowercase : Dict = [69, 12, 11, 940, 2]
lowercase : Union[str, Any] = tokenizer(lowercase_ ).input_ids
self.assertListEqual(lowercase_ , lowercase_ )
lowercase : List[str] = tokenizer(text_target=lowercase_ ).input_ids
self.assertListEqual(lowercase_ , lowercase_ )
lowercase : Optional[Any] = tokenizer.decode(lowercase_ , skip_special_tokens=lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
| 369 |
def __lowercase ( _UpperCamelCase = 50 ) ->int:
"""simple docstring"""
lowercase : str = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2, 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 173 | 0 |
'''simple docstring'''
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse("3.8"):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
_lowerCamelCase : int = ""
if version.parse(importlib_metadata.version("jiwer")) < version.parse("2.3.0"):
class SCREAMING_SNAKE_CASE ( tr.AbstractTransform ):
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCamelCase__ : str = " " ):
"""simple docstring"""
UpperCamelCase = sentence_delimiter
def A ( self : Union[str, Any] , UpperCamelCase__ : str ):
"""simple docstring"""
return list(UpperCamelCase__ )
def A ( self : List[str] , UpperCamelCase__ : List[str] ):
"""simple docstring"""
UpperCamelCase = []
for sent_idx, sentence in enumerate(UpperCamelCase__ ):
chars.extend(self.process_string(UpperCamelCase__ ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(UpperCamelCase__ ) - 1:
chars.append(self.sentence_delimiter )
return chars
_lowerCamelCase : Any = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
_lowerCamelCase : str = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
_lowerCamelCase : str = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n"
_lowerCamelCase : List[Any] = "\\nCharacter error rate (CER) is a common metric of the performance of an automatic speech recognition system.\n\nCER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.\n\nCharacter error rate can be computed as:\n\nCER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct characters,\nN is the number of characters in the reference (N=S+D+C).\n\nCER's output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the\nperformance of the ASR system with a CER of 0 being a perfect score.\n"
_lowerCamelCase : Tuple = "\nComputes CER score of transcribed segments against references.\nArgs:\n references: list of references for each speech input.\n predictions: list of transcribtions to score.\n concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.\nReturns:\n (float): the character error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> cer = datasets.load_metric(\"cer\")\n >>> cer_score = cer.compute(predictions=predictions, references=references)\n >>> print(cer_score)\n 0.34146341463414637\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE ( datasets.Metric ):
"""simple docstring"""
def A ( self : Optional[Any] ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/jitsi/jiwer/'] , reference_urls=[
'https://en.wikipedia.org/wiki/Word_error_rate',
'https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates',
] , )
def A ( self : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any]=False ):
"""simple docstring"""
if concatenate_texts:
return jiwer.compute_measures(
UpperCamelCase__ , UpperCamelCase__ , truth_transform=UpperCamelCase__ , hypothesis_transform=UpperCamelCase__ , )["wer"]
UpperCamelCase = 0
UpperCamelCase = 0
for prediction, reference in zip(UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase = jiwer.compute_measures(
UpperCamelCase__ , UpperCamelCase__ , truth_transform=UpperCamelCase__ , hypothesis_transform=UpperCamelCase__ , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 28 |
'''simple docstring'''
import math
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCamelCase__ : Optional[Any]=0 ): # a graph with Node 0,1,...,N-1
"""simple docstring"""
UpperCamelCase = n
UpperCamelCase = [
[math.inf for j in range(0 , UpperCamelCase__ )] for i in range(0 , UpperCamelCase__ )
] # adjacency matrix for weight
UpperCamelCase = [
[math.inf for j in range(0 , UpperCamelCase__ )] for i in range(0 , UpperCamelCase__ )
] # dp[i][j] stores minimum distance from i to j
def A ( self : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple ):
"""simple docstring"""
UpperCamelCase = w
def A ( self : str ):
"""simple docstring"""
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
UpperCamelCase = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def A ( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : List[Any] ):
"""simple docstring"""
return self.dp[u][v]
if __name__ == "__main__":
_lowerCamelCase : List[str] = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 28 | 1 |
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
SCREAMING_SNAKE_CASE :int = object()
# For specifying empty leaf dict `{}`
SCREAMING_SNAKE_CASE :Optional[int] = object()
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ = tuple((re.compile(x + "$" ) for x in qs) )
for i in range(len(SCREAMING_SNAKE_CASE_ ) - len(SCREAMING_SNAKE_CASE_ ) + 1 ):
UpperCamelCase_ = [x.match(SCREAMING_SNAKE_CASE_ ) for x, y in zip(SCREAMING_SNAKE_CASE_ , ks[i:] )]
if matches and all(SCREAMING_SNAKE_CASE_ ):
return True
return False
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ )-> Tuple:
"""simple docstring"""
def replace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
for rule, replacement in rules:
if _match(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return replacement
return val
return replace
def lowerCAmelCase( )-> Union[str, Any]:
"""simple docstring"""
return [
# embeddings
(("transformer", "wpe", "embedding"), P("mp" , SCREAMING_SNAKE_CASE_ )),
(("transformer", "wte", "embedding"), P("mp" , SCREAMING_SNAKE_CASE_ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(SCREAMING_SNAKE_CASE_ , "mp" )),
(("attention", "out_proj", "kernel"), P("mp" , SCREAMING_SNAKE_CASE_ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(SCREAMING_SNAKE_CASE_ , "mp" )),
(("mlp", "c_fc", "bias"), P("mp" )),
(("mlp", "c_proj", "kernel"), P("mp" , SCREAMING_SNAKE_CASE_ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ )-> str:
"""simple docstring"""
UpperCamelCase_ = _get_partition_rules()
UpperCamelCase_ = _replacement_rules(SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = {k: _unmatched for k in flatten_dict(SCREAMING_SNAKE_CASE_ )}
UpperCamelCase_ = {k: replace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(SCREAMING_SNAKE_CASE_ ) )
| 359 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __magic_name__ ( snake_case , snake_case , snake_case , unittest.TestCase ):
UpperCamelCase_ :int = AltDiffusionPipeline
UpperCamelCase_ :int = TEXT_TO_IMAGE_PARAMS
UpperCamelCase_ :List[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCamelCase_ :Any = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCamelCase_ :List[str] = TEXT_TO_IMAGE_IMAGE_PARAMS
def UpperCAmelCase_ ( self )-> int:
torch.manual_seed(0 )
UpperCamelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
UpperCamelCase_ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=_lowercase , set_alpha_to_one=_lowercase , )
torch.manual_seed(0 )
UpperCamelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
UpperCamelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_002 , )
UpperCamelCase_ = CLIPTextModel(_lowercase )
UpperCamelCase_ = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
UpperCamelCase_ = 77
UpperCamelCase_ = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def UpperCAmelCase_ ( self , _lowercase , _lowercase=0 )-> Optional[Any]:
if str(_lowercase ).startswith("mps" ):
UpperCamelCase_ = torch.manual_seed(_lowercase )
else:
UpperCamelCase_ = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
UpperCamelCase_ = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def UpperCAmelCase_ ( self )-> Optional[Any]:
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def UpperCAmelCase_ ( self )-> str:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def UpperCAmelCase_ ( self )-> str:
UpperCamelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase_ = self.get_dummy_components()
torch.manual_seed(0 )
UpperCamelCase_ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_002 , )
# TODO: remove after fixing the non-deterministic text encoder
UpperCamelCase_ = RobertaSeriesModelWithTransformation(_lowercase )
UpperCamelCase_ = text_encoder
UpperCamelCase_ = AltDiffusionPipeline(**_lowercase )
UpperCamelCase_ = alt_pipe.to(_lowercase )
alt_pipe.set_progress_bar_config(disable=_lowercase )
UpperCamelCase_ = self.get_dummy_inputs(_lowercase )
UpperCamelCase_ = "A photo of an astronaut"
UpperCamelCase_ = alt_pipe(**_lowercase )
UpperCamelCase_ = output.images
UpperCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase_ = np.array(
[0.5_748_162, 0.60_447_145, 0.48_821_217, 0.50_100_636, 0.5_431_185, 0.45_763_683, 0.49_657_696, 0.48_132_733, 0.47_573_093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase_ ( self )-> Optional[int]:
UpperCamelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase_ = self.get_dummy_components()
UpperCamelCase_ = PNDMScheduler(skip_prk_steps=_lowercase )
torch.manual_seed(0 )
UpperCamelCase_ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_002 , )
# TODO: remove after fixing the non-deterministic text encoder
UpperCamelCase_ = RobertaSeriesModelWithTransformation(_lowercase )
UpperCamelCase_ = text_encoder
UpperCamelCase_ = AltDiffusionPipeline(**_lowercase )
UpperCamelCase_ = alt_pipe.to(_lowercase )
alt_pipe.set_progress_bar_config(disable=_lowercase )
UpperCamelCase_ = self.get_dummy_inputs(_lowercase )
UpperCamelCase_ = alt_pipe(**_lowercase )
UpperCamelCase_ = output.images
UpperCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase_ = np.array(
[0.51_605_093, 0.5_707_241, 0.47_365_507, 0.50_578_886, 0.5_633_877, 0.4_642_503, 0.5_182_081, 0.48_763_484, 0.49_084_237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase ):
def UpperCAmelCase_ ( self )-> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self )-> str:
# make sure here that pndm scheduler skips prk
UpperCamelCase_ = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion" , safety_checker=_lowercase )
UpperCamelCase_ = alt_pipe.to(_lowercase )
alt_pipe.set_progress_bar_config(disable=_lowercase )
UpperCamelCase_ = "A painting of a squirrel eating a burger"
UpperCamelCase_ = torch.manual_seed(0 )
UpperCamelCase_ = alt_pipe([prompt] , generator=_lowercase , guidance_scale=6.0 , num_inference_steps=20 , output_type="np" )
UpperCamelCase_ = output.images
UpperCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase_ = np.array([0.1_010, 0.0_800, 0.0_794, 0.0_885, 0.0_843, 0.0_762, 0.0_769, 0.0_729, 0.0_586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase_ ( self )-> List[str]:
UpperCamelCase_ = DDIMScheduler.from_pretrained("BAAI/AltDiffusion" , subfolder="scheduler" )
UpperCamelCase_ = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion" , scheduler=_lowercase , safety_checker=_lowercase )
UpperCamelCase_ = alt_pipe.to(_lowercase )
alt_pipe.set_progress_bar_config(disable=_lowercase )
UpperCamelCase_ = "A painting of a squirrel eating a burger"
UpperCamelCase_ = torch.manual_seed(0 )
UpperCamelCase_ = alt_pipe([prompt] , generator=_lowercase , num_inference_steps=2 , output_type="numpy" )
UpperCamelCase_ = output.images
UpperCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase_ = np.array([0.4_019, 0.4_052, 0.3_810, 0.4_119, 0.3_916, 0.3_982, 0.4_651, 0.4_195, 0.5_323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 60 | 0 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__: str = logging.get_logger(__name__)
UpperCamelCase__: Tuple = {
"microsoft/unispeech-sat-base-100h-libri-ft": (
"https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json"
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
lowerCamelCase__ = """unispeech-sat"""
def __init__( self : str , __snake_case : List[str]=32 , __snake_case : List[str]=768 , __snake_case : str=12 , __snake_case : int=12 , __snake_case : Union[str, Any]=3072 , __snake_case : List[Any]="gelu" , __snake_case : Union[str, Any]=0.1 , __snake_case : Union[str, Any]=0.1 , __snake_case : Dict=0.1 , __snake_case : List[Any]=0.0 , __snake_case : Any=0.0 , __snake_case : Any=0.1 , __snake_case : List[str]=0.1 , __snake_case : Optional[Any]=0.02 , __snake_case : Dict=1E-5 , __snake_case : int="group" , __snake_case : Any="gelu" , __snake_case : Dict=(512, 512, 512, 512, 512, 512, 512) , __snake_case : str=(5, 2, 2, 2, 2, 2, 2) , __snake_case : Tuple=(10, 3, 3, 3, 3, 2, 2) , __snake_case : Optional[int]=False , __snake_case : List[str]=128 , __snake_case : Any=16 , __snake_case : List[str]=False , __snake_case : List[str]=True , __snake_case : Union[str, Any]=0.05 , __snake_case : Tuple=10 , __snake_case : Optional[Any]=2 , __snake_case : Dict=0.0 , __snake_case : int=10 , __snake_case : Tuple=0 , __snake_case : Dict=320 , __snake_case : List[Any]=2 , __snake_case : int=0.1 , __snake_case : str=100 , __snake_case : List[Any]=256 , __snake_case : str=256 , __snake_case : Tuple=0.1 , __snake_case : int="mean" , __snake_case : List[str]=False , __snake_case : Optional[Any]=False , __snake_case : str=256 , __snake_case : Tuple=(512, 512, 512, 512, 1500) , __snake_case : Union[str, Any]=(5, 3, 3, 1, 1) , __snake_case : Dict=(1, 2, 3, 1, 1) , __snake_case : int=512 , __snake_case : Dict=0 , __snake_case : Optional[int]=1 , __snake_case : int=2 , __snake_case : List[Any]=504 , **__snake_case : Dict , ) -> Optional[int]:
super().__init__(**__snake_case , pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case )
UpperCAmelCase : Tuple = hidden_size
UpperCAmelCase : Optional[Any] = feat_extract_norm
UpperCAmelCase : int = feat_extract_activation
UpperCAmelCase : Optional[int] = list(__snake_case )
UpperCAmelCase : List[str] = list(__snake_case )
UpperCAmelCase : List[str] = list(__snake_case )
UpperCAmelCase : Tuple = conv_bias
UpperCAmelCase : Union[str, Any] = num_conv_pos_embeddings
UpperCAmelCase : Optional[Any] = num_conv_pos_embedding_groups
UpperCAmelCase : Tuple = len(self.conv_dim )
UpperCAmelCase : Optional[Any] = num_hidden_layers
UpperCAmelCase : Optional[Any] = intermediate_size
UpperCAmelCase : Tuple = hidden_act
UpperCAmelCase : Union[str, Any] = num_attention_heads
UpperCAmelCase : Dict = hidden_dropout
UpperCAmelCase : Optional[Any] = attention_dropout
UpperCAmelCase : Optional[int] = activation_dropout
UpperCAmelCase : Any = feat_proj_dropout
UpperCAmelCase : Optional[Any] = final_dropout
UpperCAmelCase : int = layerdrop
UpperCAmelCase : str = layer_norm_eps
UpperCAmelCase : Dict = initializer_range
UpperCAmelCase : Any = vocab_size
UpperCAmelCase : str = num_clusters
UpperCAmelCase : Tuple = do_stable_layer_norm
UpperCAmelCase : Any = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase : Dict = apply_spec_augment
UpperCAmelCase : Optional[int] = mask_time_prob
UpperCAmelCase : Union[str, Any] = mask_time_length
UpperCAmelCase : Tuple = mask_time_min_masks
UpperCAmelCase : Tuple = mask_feature_prob
UpperCAmelCase : Optional[Any] = mask_feature_length
UpperCAmelCase : List[str] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
UpperCAmelCase : List[str] = num_codevectors_per_group
UpperCAmelCase : List[Any] = num_codevector_groups
UpperCAmelCase : Tuple = contrastive_logits_temperature
UpperCAmelCase : Union[str, Any] = feat_quantizer_dropout
UpperCAmelCase : Any = num_negatives
UpperCAmelCase : int = codevector_dim
UpperCAmelCase : Union[str, Any] = proj_codevector_dim
UpperCAmelCase : Union[str, Any] = diversity_loss_weight
# ctc loss
UpperCAmelCase : Tuple = ctc_loss_reduction
UpperCAmelCase : List[Any] = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCAmelCase : str = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase : Tuple = list(__snake_case )
UpperCAmelCase : List[str] = list(__snake_case )
UpperCAmelCase : List[Any] = list(__snake_case )
UpperCAmelCase : Optional[Any] = xvector_output_dim
@property
def A ( self : Dict ) -> int:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 23 |
"""simple docstring"""
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
_UpperCamelCase : Optional[Any] = TypeVar('T')
class a ( Generic[T] ):
def __init__( self , _lowerCamelCase = True ):
lowercase = {} # dictionary of lists
lowercase = directed
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase ):
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(_lowerCamelCase )
self.adj_list[destination_vertex].append(_lowerCamelCase )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(_lowerCamelCase )
lowercase = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(_lowerCamelCase )
lowercase = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
lowercase = [destination_vertex]
lowercase = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(_lowerCamelCase )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(_lowerCamelCase )
lowercase = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
lowercase = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
lowercase = [destination_vertex]
lowercase = []
return self
def __repr__( self ):
return pformat(self.adj_list )
| 220 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Tuple = logging.get_logger(__name__)
_lowercase : Any = {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = 'speech_to_text_2'
_a = ['past_key_values']
_a = {'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Tuple, lowerCamelCase : List[str]=1_0000, lowerCamelCase : List[str]=6, lowerCamelCase : int=2048, lowerCamelCase : List[Any]=4, lowerCamelCase : Union[str, Any]=0.0, lowerCamelCase : Any=True, lowerCamelCase : Any="relu", lowerCamelCase : Any=256, lowerCamelCase : int=0.1, lowerCamelCase : str=0.0, lowerCamelCase : Any=0.0, lowerCamelCase : Optional[Any]=0.02, lowerCamelCase : str=2, lowerCamelCase : List[str]=True, lowerCamelCase : List[Any]=1, lowerCamelCase : List[str]=0, lowerCamelCase : Optional[int]=2, lowerCamelCase : Optional[Any]=1024, **lowerCamelCase : Any, )-> Dict:
lowerCamelCase__ : Tuple =vocab_size
lowerCamelCase__ : Optional[Any] =d_model
lowerCamelCase__ : Dict =decoder_ffn_dim
lowerCamelCase__ : List[str] =decoder_layers
lowerCamelCase__ : int =decoder_attention_heads
lowerCamelCase__ : List[Any] =dropout
lowerCamelCase__ : List[str] =attention_dropout
lowerCamelCase__ : int =activation_dropout
lowerCamelCase__ : Any =activation_function
lowerCamelCase__ : Optional[int] =init_std
lowerCamelCase__ : Tuple =decoder_layerdrop
lowerCamelCase__ : Optional[Any] =use_cache
lowerCamelCase__ : str =decoder_layers
lowerCamelCase__ : str =scale_embedding # scale factor will be sqrt(d_model) if True
lowerCamelCase__ : Optional[int] =max_target_positions
super().__init__(
pad_token_id=lowerCamelCase, bos_token_id=lowerCamelCase, eos_token_id=lowerCamelCase, decoder_start_token_id=lowerCamelCase, **lowerCamelCase, )
| 359 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
_lowercase : Any = {
"configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"],
"processing_trocr": ["TrOCRProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[Any] = [
"TROCR_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrOCRForCausalLM",
"TrOCRPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
_lowercase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 272 | 0 |
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any]=False) -> Dict:
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__):
__UpperCamelCase : str = len(set_a.intersection(SCREAMING_SNAKE_CASE__))
if alternative_union:
__UpperCamelCase : Union[str, Any] = len(SCREAMING_SNAKE_CASE__) + len(SCREAMING_SNAKE_CASE__)
else:
__UpperCamelCase : List[Any] = len(set_a.union(SCREAMING_SNAKE_CASE__))
return intersection / union
if isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple)) and isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple)):
__UpperCamelCase : Any = [element for element in set_a if element in set_b]
if alternative_union:
__UpperCamelCase : List[Any] = len(SCREAMING_SNAKE_CASE__) + len(SCREAMING_SNAKE_CASE__)
return len(SCREAMING_SNAKE_CASE__) / union
else:
__UpperCamelCase : List[Any] = set_a + [element for element in set_b if element not in set_a]
return len(SCREAMING_SNAKE_CASE__) / len(SCREAMING_SNAKE_CASE__)
return len(SCREAMING_SNAKE_CASE__) / len(SCREAMING_SNAKE_CASE__)
return None
if __name__ == "__main__":
lowercase : Union[str, Any] = {'a', 'b', 'c', 'd', 'e'}
lowercase : Union[str, Any] = {'c', 'd', 'e', 'f', 'h', 'i'}
print(jaccard_similarity(set_a, set_b)) | 232 |
from ...processing_utils import ProcessorMixin
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : int = ["""image_processor""", """feature_extractor"""]
snake_case_ : List[Any] = """TvltImageProcessor"""
snake_case_ : Dict = """TvltFeatureExtractor"""
def __init__( self : Any , lowerCAmelCase : Optional[int] , lowerCAmelCase : str) -> Optional[int]:
"""simple docstring"""
super().__init__(image_processor=lowerCAmelCase , feature_extractor=lowerCAmelCase)
_snake_case : List[Any] = image_processor
_snake_case : List[Any] = feature_extractor
def __call__( self : Union[str, Any] , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : List[str]=None , lowerCAmelCase : Dict=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : List[Any]=False , lowerCAmelCase : Dict=False , *lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : Any , ) -> Any:
"""simple docstring"""
if images is None and audio is None:
raise ValueError("""You need to specify either an `images` or `audio` input to process.""")
_snake_case : Union[str, Any] = None
if images is not None:
_snake_case : Any = self.image_processor(lowerCAmelCase , mask_pixel=lowerCAmelCase , *lowerCAmelCase , **lowerCAmelCase)
if images_mixed is not None:
_snake_case : Union[str, Any] = self.image_processor(lowerCAmelCase , is_mixed=lowerCAmelCase , *lowerCAmelCase , **lowerCAmelCase)
if audio is not None:
_snake_case : int = self.feature_extractor(
lowerCAmelCase , *lowerCAmelCase , sampling_rate=lowerCAmelCase , mask_audio=lowerCAmelCase , **lowerCAmelCase)
_snake_case : Any = {}
if audio is not None:
output_dict.update(lowerCAmelCase)
if images is not None:
output_dict.update(lowerCAmelCase)
if images_mixed_dict is not None:
output_dict.update(lowerCAmelCase)
return output_dict
@property
def UpperCamelCase_ ( self : Union[str, Any]) -> Any:
"""simple docstring"""
_snake_case : Optional[Any] = self.image_processor.model_input_names
_snake_case : List[str] = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names))
| 317 | 0 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
snake_case = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
snake_case = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"""{len(upper_files)} files contain uppercase characters:""")
print("""\n""".join(upper_files) + """\n""")
snake_case = [file for file in filepaths if """ """ in file]
if space_files:
print(F"""{len(space_files)} files contain space characters:""")
print("""\n""".join(space_files) + """\n""")
snake_case = [file for file in filepaths if """-""" in file]
if hyphen_files:
print(F"""{len(hyphen_files)} files contain hyphen characters:""")
print("""\n""".join(hyphen_files) + """\n""")
snake_case = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"""{len(nodir_files)} files are not in a directory:""")
print("""\n""".join(nodir_files) + """\n""")
snake_case = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 319 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
snake_case = logging.get_logger(__name__)
snake_case = [
("""bert.bert""", """visual_bert"""),
("""bert.cls""", """cls"""),
("""bert.classifier""", """cls"""),
("""token_type_embeddings_visual""", """visual_token_type_embeddings"""),
("""position_embeddings_visual""", """visual_position_embeddings"""),
("""projection""", """visual_projection"""),
]
snake_case = [
"""nlvr2_coco_pre_trained.th""",
"""nlvr2_fine_tuned.th""",
"""nlvr2_pre_trained.th""",
"""vcr_coco_pre_train.th""",
"""vcr_fine_tune.th""",
"""vcr_pre_train.th""",
"""vqa_coco_pre_trained.th""",
"""vqa_fine_tuned.th""",
"""vqa_pre_trained.th""",
]
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = torch.load(lowercase , map_location="cpu" )
return sd
def lowerCamelCase__ ( lowercase , lowercase , lowercase=rename_keys_prefix ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = OrderedDict()
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
SCREAMING_SNAKE_CASE : Optional[Any] = key
for name_pair in rename_keys_prefix:
SCREAMING_SNAKE_CASE : Tuple = new_key.replace(name_pair[0] , name_pair[1] )
SCREAMING_SNAKE_CASE : Union[str, Any] = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
SCREAMING_SNAKE_CASE : Union[str, Any] = new_d["cls.predictions.bias"]
return new_d
@torch.no_grad()
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
assert (
checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS
), F'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
SCREAMING_SNAKE_CASE : str = "pretraining"
if "vcr" in checkpoint_path:
SCREAMING_SNAKE_CASE : str = {"visual_embedding_dim": 512}
elif "vqa_advanced" in checkpoint_path:
SCREAMING_SNAKE_CASE : Union[str, Any] = {"visual_embedding_dim": 2048}
elif "vqa" in checkpoint_path:
SCREAMING_SNAKE_CASE : Optional[int] = {"visual_embedding_dim": 2048}
elif "nlvr" in checkpoint_path:
SCREAMING_SNAKE_CASE : Union[str, Any] = {"visual_embedding_dim": 1024}
else:
raise NotImplementedError(F'''No implementation found for `{checkpoint_path}`.''' )
else:
if "vcr" in checkpoint_path:
SCREAMING_SNAKE_CASE : Optional[Any] = {"visual_embedding_dim": 512}
SCREAMING_SNAKE_CASE : Union[str, Any] = "multichoice"
elif "vqa_advanced" in checkpoint_path:
SCREAMING_SNAKE_CASE : int = {"visual_embedding_dim": 2048}
SCREAMING_SNAKE_CASE : Any = "vqa_advanced"
elif "vqa" in checkpoint_path:
SCREAMING_SNAKE_CASE : Any = {"visual_embedding_dim": 2048, "num_labels": 3129}
SCREAMING_SNAKE_CASE : Tuple = "vqa"
elif "nlvr" in checkpoint_path:
SCREAMING_SNAKE_CASE : int = {
"visual_embedding_dim": 1024,
"num_labels": 2,
}
SCREAMING_SNAKE_CASE : Union[str, Any] = "nlvr"
SCREAMING_SNAKE_CASE : List[Any] = VisualBertConfig(**lowercase )
# Load State Dict
SCREAMING_SNAKE_CASE : Union[str, Any] = load_state_dict(lowercase )
SCREAMING_SNAKE_CASE : Union[str, Any] = get_new_dict(lowercase , lowercase )
if model_type == "pretraining":
SCREAMING_SNAKE_CASE : Union[str, Any] = VisualBertForPreTraining(lowercase )
elif model_type == "vqa":
SCREAMING_SNAKE_CASE : Optional[Any] = VisualBertForQuestionAnswering(lowercase )
elif model_type == "nlvr":
SCREAMING_SNAKE_CASE : Optional[Any] = VisualBertForVisualReasoning(lowercase )
elif model_type == "multichoice":
SCREAMING_SNAKE_CASE : List[Any] = VisualBertForMultipleChoice(lowercase )
model.load_state_dict(lowercase )
# Save Checkpoints
Path(lowercase ).mkdir(exist_ok=lowercase )
model.save_pretrained(lowercase )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""orig_checkpoint_path""", type=str, help="""A path to .th on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", type=str, help="""Path to the output PyTorch model.""")
snake_case = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 319 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.