code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
lowercase_ = logging.getLogger(__name__)
require_version("pytorch_lightning>=1.0.4")
lowercase_ = {
"base": AutoModel,
"sequence-classification": AutoModelForSequenceClassification,
"question-answering": AutoModelForQuestionAnswering,
"pretraining": AutoModelForPreTraining,
"token-classification": AutoModelForTokenClassification,
"language-modeling": AutoModelWithLMHead,
"summarization": AutoModelForSeqaSeqLM,
"translation": AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
lowercase_ = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
lowercase_ = sorted(arg_to_scheduler.keys())
lowercase_ = "{" + ", ".join(arg_to_scheduler_choices) + "}"
class __lowerCAmelCase ( pl.LightningModule ):
'''simple docstring'''
def __init__( self , _a , _a=None , _a="base" , _a=None , _a=None , _a=None , **_a , ):
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(_a )
__a = 0
__a = Path(self.hparams.output_dir )
__a = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
__a = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'''num_labels''': num_labels} if num_labels is not None else {}) , cache_dir=_a , **_a , )
else:
__a = config
__a = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(self.hparams , _a , _a ):
assert hasattr(self.config , _a ), f'''model config doesn\'t have a `{p}` attribute'''
setattr(self.config , _a , getattr(self.hparams , _a ) )
if tokenizer is None:
__a = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=_a , )
else:
__a = tokenizer
__a = MODEL_MODES[mode]
if model is None:
__a = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool('''.ckpt''' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=_a , )
else:
__a = model
def __UpperCAmelCase ( self , *_a , **_a ):
__a = self.model_type.from_pretrained(*_a , **_a )
def __UpperCAmelCase ( self ):
__a = arg_to_scheduler[self.hparams.lr_scheduler]
__a = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
__a = {'''scheduler''': scheduler, '''interval''': '''step''', '''frequency''': 1}
return scheduler
def __UpperCAmelCase ( self ):
__a = self.model
__a = ['''bias''', '''LayerNorm.weight''']
__a = [
{
'''params''': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
'''weight_decay''': self.hparams.weight_decay,
},
{
'''params''': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
'''weight_decay''': 0.0,
},
]
if self.hparams.adafactor:
__a = Adafactor(
_a , lr=self.hparams.learning_rate , scale_parameter=_a , relative_step=_a )
else:
__a = AdamW(
_a , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
__a = optimizer
__a = self.get_lr_scheduler()
return [optimizer], [scheduler]
def __UpperCAmelCase ( self , _a , _a ):
return self.validation_step(_a , _a )
def __UpperCAmelCase ( self , _a ):
return self.validation_end(_a )
def __UpperCAmelCase ( self ):
__a = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
__a = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def __UpperCAmelCase ( self , _a ):
if stage == "test":
__a = len(self.test_dataloader().dataset )
else:
__a = self.get_dataloader('''train''' , self.hparams.train_batch_size , shuffle=_a )
__a = len(self.train_dataloader().dataset )
def __UpperCAmelCase ( self , _a , _a , _a = False ):
raise NotImplementedError('''You must implement this for your task''' )
def __UpperCAmelCase ( self ):
return self.train_loader
def __UpperCAmelCase ( self ):
return self.get_dataloader('''dev''' , self.hparams.eval_batch_size , shuffle=_a )
def __UpperCAmelCase ( self ):
return self.get_dataloader('''test''' , self.hparams.eval_batch_size , shuffle=_a )
def __UpperCAmelCase ( self , _a ):
return os.path.join(
self.hparams.data_dir , '''cached_{}_{}_{}'''.format(
_a , list(filter(_a , self.hparams.model_name_or_path.split('''/''' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def __UpperCAmelCase ( self , _a ):
__a = self.output_dir.joinpath('''best_tfmr''' )
__a = self.step_count
self.model.save_pretrained(_a )
self.tokenizer.save_pretrained(_a )
@staticmethod
def __UpperCAmelCase ( _a , _a ):
parser.add_argument(
'''--model_name_or_path''' , default=_a , type=_a , required=_a , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--config_name''' , default='''''' , type=_a , help='''Pretrained config name or path if not the same as model_name''' )
parser.add_argument(
'''--tokenizer_name''' , default=_a , type=_a , help='''Pretrained tokenizer name or path if not the same as model_name''' , )
parser.add_argument(
'''--cache_dir''' , default=str(Path(_a ).parent / '''test_run''' / '''cache''' ) , type=_a , help='''Where do you want to store the pre-trained models downloaded from huggingface.co''' , )
parser.add_argument(
'''--encoder_layerdrop''' , type=_a , help='''Encoder layer dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--decoder_layerdrop''' , type=_a , help='''Decoder layer dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--dropout''' , type=_a , help='''Dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--attention_dropout''' , type=_a , help='''Attention dropout probability (Optional). Goes into model.config''' , )
parser.add_argument('''--learning_rate''' , default=5E-5 , type=_a , help='''The initial learning rate for Adam.''' )
parser.add_argument(
'''--lr_scheduler''' , default='''linear''' , choices=_a , metavar=_a , type=_a , help='''Learning rate scheduler''' , )
parser.add_argument('''--weight_decay''' , default=0.0 , type=_a , help='''Weight decay if we apply some.''' )
parser.add_argument('''--adam_epsilon''' , default=1E-8 , type=_a , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--warmup_steps''' , default=0 , type=_a , help='''Linear warmup over warmup_steps.''' )
parser.add_argument('''--num_workers''' , default=4 , type=_a , help='''kwarg passed to DataLoader''' )
parser.add_argument('''--num_train_epochs''' , dest='''max_epochs''' , default=3 , type=_a )
parser.add_argument('''--train_batch_size''' , default=32 , type=_a )
parser.add_argument('''--eval_batch_size''' , default=32 , type=_a )
parser.add_argument('''--adafactor''' , action='''store_true''' )
class __lowerCAmelCase ( pl.Callback ):
'''simple docstring'''
def __UpperCAmelCase ( self , _a , _a ):
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class __lowerCAmelCase ( pl.Callback ):
'''simple docstring'''
def __UpperCAmelCase ( self , _a , _a ):
# print(pl_module.model.rag)
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(_a )
class __lowerCAmelCase ( pl.Callback ):
'''simple docstring'''
def __UpperCAmelCase ( self , _a , _a ):
__a = trainer.lr_schedulers[0]['''scheduler''']
__a = {f'''lr_group_{i}''': lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(_a )
def __UpperCAmelCase ( self , _a , _a ):
rank_zero_info('''***** Validation results *****''' )
__a = trainer.callback_metrics
# Log results
for key in sorted(_a ):
if key not in ["log", "progress_bar"]:
rank_zero_info('''{} = {}\n'''.format(_a , str(metrics[key] ) ) )
def __UpperCAmelCase ( self , _a , _a ):
rank_zero_info('''***** Test results *****''' )
__a = trainer.callback_metrics
# Log and save results to file
__a = os.path.join(pl_module.hparams.output_dir , '''test_results.txt''' )
with open(_a , '''w''' ) as writer:
for key in sorted(_a ):
if key not in ["log", "progress_bar"]:
rank_zero_info('''{} = {}\n'''.format(_a , str(metrics[key] ) ) )
writer.write('''{} = {}\n'''.format(_a , str(metrics[key] ) ) )
def lowercase ( lowerCAmelCase__ : int , lowerCAmelCase__ : List[Any] ) -> None:
# To allow all pl args uncomment the following line
# parser = pl.Trainer.add_argparse_args(parser)
parser.add_argument(
'''--output_dir''' , default=str(Path(lowerCAmelCase__ ).parent / '''test_run''' / '''model_checkpoints''' ) , type=lowerCAmelCase__ , help='''The output directory where the model predictions and checkpoints will be written.''' , )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=lowerCAmelCase__ , default='''O2''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_tpu_cores''' , dest='''tpu_cores''' , type=lowerCAmelCase__ )
parser.add_argument('''--max_grad_norm''' , dest='''gradient_clip_val''' , default=1.0 , type=lowerCAmelCase__ , help='''Max gradient norm''' )
parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' )
parser.add_argument('''--do_predict''' , action='''store_true''' , help='''Whether to run predictions on the test set.''' )
parser.add_argument(
'''--gradient_accumulation_steps''' , dest='''accumulate_grad_batches''' , type=lowerCAmelCase__ , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , )
parser.add_argument('''--seed''' , type=lowerCAmelCase__ , default=42 , help='''random seed for initialization''' )
parser.add_argument(
'''--data_dir''' , default=str(Path(lowerCAmelCase__ ).parent / '''test_run''' / '''dummy-train-data''' ) , type=lowerCAmelCase__ , help='''The input data dir. Should contain the training files for the CoNLL-2003 NER task.''' , )
def lowercase ( lowerCAmelCase__ : BaseTransformer , lowerCAmelCase__ : argparse.Namespace , lowerCAmelCase__ : List[Any]=None , lowerCAmelCase__ : List[Any]=True , lowerCAmelCase__ : int=[] , lowerCAmelCase__ : str=None , lowerCAmelCase__ : List[str]=None , **lowerCAmelCase__ : str , ) -> Optional[int]:
pl.seed_everything(args.seed )
# init model
__a = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=lowerCAmelCase__ )
# add custom checkpoints
if checkpoint_callback is None:
__a = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='''checkpoint''' , monitor='''val_loss''' , mode='''min''' , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(lowerCAmelCase__ )
if logging_callback is None:
__a = LoggingCallback()
__a = {}
if args.fpaa:
__a = 16
if args.gpus > 1:
__a = '''auto'''
__a = '''ddp'''
__a = args.accumulate_grad_batches
__a = None
__a = '''auto'''
__a = pl.Trainer.from_argparse_args(
lowerCAmelCase__ , weights_summary=lowerCAmelCase__ , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=lowerCAmelCase__ , val_check_interval=1 , num_sanity_val_steps=2 , **lowerCAmelCase__ , )
if args.do_train:
trainer.fit(lowerCAmelCase__ )
else:
print('''RAG modeling tests with new set functions successfuly executed!''' )
return trainer
| 11 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"RWKV/rwkv-4-169m-pile": "https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json",
"RWKV/rwkv-4-430m-pile": "https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json",
"RWKV/rwkv-4-1b5-pile": "https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json",
"RWKV/rwkv-4-3b-pile": "https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json",
"RWKV/rwkv-4-7b-pile": "https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json",
"RWKV/rwkv-4-14b-pile": "https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json",
"RWKV/rwkv-raven-1b5": "https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json",
"RWKV/rwkv-raven-3b": "https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json",
"RWKV/rwkv-raven-7b": "https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json",
"RWKV/rwkv-raven-14b": "https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json",
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Any = 'rwkv'
__UpperCAmelCase : Optional[Any] = {'max_position_embeddings': 'context_length'}
def __init__( self , _a=50_277 , _a=1_024 , _a=4_096 , _a=32 , _a=None , _a=None , _a=1E-5 , _a=0 , _a=0 , _a=6 , _a=False , _a=True , **_a , ):
__a = vocab_size
__a = context_length
__a = hidden_size
__a = num_hidden_layers
__a = attention_hidden_size if attention_hidden_size is not None else hidden_size
__a = intermediate_size if intermediate_size is not None else 4 * hidden_size
__a = layer_norm_epsilon
__a = rescale_every
__a = use_cache
__a = bos_token_id
__a = eos_token_id
super().__init__(
tie_word_embeddings=_a , bos_token_id=_a , eos_token_id=_a , **_a )
| 11 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : str = (
'This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'
'It takes two arguments named `image` which should be the original image, and `label` which should be a text '
'describing the elements what should be identified in the segmentation mask. The tool returns the mask.'
)
__UpperCAmelCase : List[str] = 'CIDAS/clipseg-rd64-refined'
__UpperCAmelCase : Optional[Any] = 'image_segmenter'
__UpperCAmelCase : str = CLIPSegForImageSegmentation
__UpperCAmelCase : Any = ['image', 'text']
__UpperCAmelCase : Union[str, Any] = ['image']
def __init__( self , *_a , **_a ):
requires_backends(self , ['''vision'''] )
super().__init__(*_a , **_a )
def __UpperCAmelCase ( self , _a , _a ):
return self.pre_processor(text=[label] , images=[image] , padding=_a , return_tensors='''pt''' )
def __UpperCAmelCase ( self , _a ):
with torch.no_grad():
__a = self.model(**_a ).logits
return logits
def __UpperCAmelCase ( self , _a ):
__a = outputs.cpu().detach().numpy()
__a = 0
__a = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 11 |
"""simple docstring"""
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : List[str] = (UnCLIPScheduler,)
def __UpperCAmelCase ( self , **_a ):
__a = {
'''num_train_timesteps''': 1_000,
'''variance_type''': '''fixed_small_log''',
'''clip_sample''': True,
'''clip_sample_range''': 1.0,
'''prediction_type''': '''epsilon''',
}
config.update(**_a )
return config
def __UpperCAmelCase ( self ):
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_a )
def __UpperCAmelCase ( self ):
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=_a )
def __UpperCAmelCase ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_a )
def __UpperCAmelCase ( self ):
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=_a )
def __UpperCAmelCase ( self ):
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=_a )
def __UpperCAmelCase ( self ):
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=_a , prev_timestep=_a )
def __UpperCAmelCase ( self ):
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config(variance_type='''fixed_small_log''' )
__a = scheduler_class(**_a )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0_000E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.054_9625 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.999_4987 ) ) < 1E-5
def __UpperCAmelCase ( self ):
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config(variance_type='''learned_range''' )
__a = scheduler_class(**_a )
__a = 0.5
assert scheduler._get_variance(1 , predicted_variance=_a ) - -10.171_2790 < 1E-5
assert scheduler._get_variance(487 , predicted_variance=_a ) - -5.799_8052 < 1E-5
assert scheduler._get_variance(999 , predicted_variance=_a ) - -0.001_0011 < 1E-5
def __UpperCAmelCase ( self ):
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**_a )
__a = scheduler.timesteps
__a = self.dummy_model()
__a = self.dummy_sample_deter
__a = torch.manual_seed(0 )
for i, t in enumerate(_a ):
# 1. predict noise residual
__a = model(_a , _a )
# 2. predict previous mean of sample x_t-1
__a = scheduler.step(_a , _a , _a , generator=_a ).prev_sample
__a = pred_prev_sample
__a = torch.sum(torch.abs(_a ) )
__a = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 252.268_2495 ) < 1E-2
assert abs(result_mean.item() - 0.328_4743 ) < 1E-3
def __UpperCAmelCase ( self ):
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**_a )
scheduler.set_timesteps(25 )
__a = scheduler.timesteps
__a = self.dummy_model()
__a = self.dummy_sample_deter
__a = torch.manual_seed(0 )
for i, t in enumerate(_a ):
# 1. predict noise residual
__a = model(_a , _a )
if i + 1 == timesteps.shape[0]:
__a = None
else:
__a = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
__a = scheduler.step(
_a , _a , _a , prev_timestep=_a , generator=_a ).prev_sample
__a = pred_prev_sample
__a = torch.sum(torch.abs(_a ) )
__a = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 258.204_4983 ) < 1E-2
assert abs(result_mean.item() - 0.336_2038 ) < 1E-3
def __UpperCAmelCase ( self ):
pass
def __UpperCAmelCase ( self ):
pass
| 11 | 1 |
"""simple docstring"""
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def lowercase ( ) -> Dict:
__a = HfArgumentParser(lowerCAmelCase__ )
__a = parser.parse_args_into_dataclasses()[0]
__a = TensorFlowBenchmark(args=lowerCAmelCase__ )
try:
__a = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
__a = '''Arg --no_{0} is no longer used, please use --no-{0} instead.'''
__a = ''' '''.join(str(lowerCAmelCase__ ).split(''' ''' )[:-1] )
__a = ''''''
__a = eval(str(lowerCAmelCase__ ).split(''' ''' )[-1] )
__a = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
__a = full_error_msg + begin_error_msg + str(lowerCAmelCase__ )
raise ValueError(lowerCAmelCase__ )
benchmark.run()
if __name__ == "__main__":
main()
| 11 |
"""simple docstring"""
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def lowercase ( lowerCAmelCase__ : Dict ) -> Optional[int]:
__a , __a = image.size
__a , __a = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
__a = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] )
__a = np.array(lowerCAmelCase__ ).astype(np.floataa ) / 2_55.0
__a = image[None].transpose(0 , 3 , 1 , 2 )
__a = torch.from_numpy(lowerCAmelCase__ )
return 2.0 * image - 1.0
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a , _a , _a , ):
super().__init__()
self.register_modules(vqvae=_a , unet=_a , scheduler=_a )
@torch.no_grad()
def __call__( self , _a = None , _a = 1 , _a = 100 , _a = 0.0 , _a = None , _a = "pil" , _a = True , ):
if isinstance(_a , PIL.Image.Image ):
__a = 1
elif isinstance(_a , torch.Tensor ):
__a = image.shape[0]
else:
raise ValueError(f'''`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(_a )}''' )
if isinstance(_a , PIL.Image.Image ):
__a = preprocess(_a )
__a , __a = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
__a = (batch_size, self.unet.config.in_channels // 2, height, width)
__a = next(self.unet.parameters() ).dtype
__a = randn_tensor(_a , generator=_a , device=self.device , dtype=_a )
__a = image.to(device=self.device , dtype=_a )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(_a , device=self.device )
__a = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
__a = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__a = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__a = {}
if accepts_eta:
__a = eta
for t in self.progress_bar(_a ):
# concat latents and low resolution image in the channel dimension.
__a = torch.cat([latents, image] , dim=1 )
__a = self.scheduler.scale_model_input(_a , _a )
# predict the noise residual
__a = self.unet(_a , _a ).sample
# compute the previous noisy sample x_t -> x_t-1
__a = self.scheduler.step(_a , _a , _a , **_a ).prev_sample
# decode the image latents with the VQVAE
__a = self.vqvae.decode(_a ).sample
__a = torch.clamp(_a , -1.0 , 1.0 )
__a = image / 2 + 0.5
__a = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__a = self.numpy_to_pil(_a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_a )
| 11 | 1 |
"""simple docstring"""
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : List[str] = (UnCLIPScheduler,)
def __UpperCAmelCase ( self , **_a ):
__a = {
'''num_train_timesteps''': 1_000,
'''variance_type''': '''fixed_small_log''',
'''clip_sample''': True,
'''clip_sample_range''': 1.0,
'''prediction_type''': '''epsilon''',
}
config.update(**_a )
return config
def __UpperCAmelCase ( self ):
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_a )
def __UpperCAmelCase ( self ):
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=_a )
def __UpperCAmelCase ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_a )
def __UpperCAmelCase ( self ):
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=_a )
def __UpperCAmelCase ( self ):
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=_a )
def __UpperCAmelCase ( self ):
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=_a , prev_timestep=_a )
def __UpperCAmelCase ( self ):
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config(variance_type='''fixed_small_log''' )
__a = scheduler_class(**_a )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0_000E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.054_9625 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.999_4987 ) ) < 1E-5
def __UpperCAmelCase ( self ):
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config(variance_type='''learned_range''' )
__a = scheduler_class(**_a )
__a = 0.5
assert scheduler._get_variance(1 , predicted_variance=_a ) - -10.171_2790 < 1E-5
assert scheduler._get_variance(487 , predicted_variance=_a ) - -5.799_8052 < 1E-5
assert scheduler._get_variance(999 , predicted_variance=_a ) - -0.001_0011 < 1E-5
def __UpperCAmelCase ( self ):
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**_a )
__a = scheduler.timesteps
__a = self.dummy_model()
__a = self.dummy_sample_deter
__a = torch.manual_seed(0 )
for i, t in enumerate(_a ):
# 1. predict noise residual
__a = model(_a , _a )
# 2. predict previous mean of sample x_t-1
__a = scheduler.step(_a , _a , _a , generator=_a ).prev_sample
__a = pred_prev_sample
__a = torch.sum(torch.abs(_a ) )
__a = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 252.268_2495 ) < 1E-2
assert abs(result_mean.item() - 0.328_4743 ) < 1E-3
def __UpperCAmelCase ( self ):
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**_a )
scheduler.set_timesteps(25 )
__a = scheduler.timesteps
__a = self.dummy_model()
__a = self.dummy_sample_deter
__a = torch.manual_seed(0 )
for i, t in enumerate(_a ):
# 1. predict noise residual
__a = model(_a , _a )
if i + 1 == timesteps.shape[0]:
__a = None
else:
__a = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
__a = scheduler.step(
_a , _a , _a , prev_timestep=_a , generator=_a ).prev_sample
__a = pred_prev_sample
__a = torch.sum(torch.abs(_a ) )
__a = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 258.204_4983 ) < 1E-2
assert abs(result_mean.item() - 0.336_2038 ) < 1E-3
def __UpperCAmelCase ( self ):
pass
def __UpperCAmelCase ( self ):
pass
| 11 |
"""simple docstring"""
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Dict = DistilBertTokenizer
__UpperCAmelCase : Any = DistilBertTokenizerFast
__UpperCAmelCase : int = True
@slow
def __UpperCAmelCase ( self ):
__a = DistilBertTokenizer.from_pretrained('''distilbert-base-uncased''' )
__a = tokenizer.encode('''sequence builders''' , add_special_tokens=_a )
__a = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_a )
__a = tokenizer.build_inputs_with_special_tokens(_a )
__a = tokenizer.build_inputs_with_special_tokens(_a , _a )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 11 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowercase_ = {
"configuration_mega": ["MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegaConfig", "MegaOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"MEGA_PRETRAINED_MODEL_ARCHIVE_LIST",
"MegaForCausalLM",
"MegaForMaskedLM",
"MegaForMultipleChoice",
"MegaForQuestionAnswering",
"MegaForSequenceClassification",
"MegaForTokenClassification",
"MegaModel",
"MegaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 11 |
"""simple docstring"""
from math import factorial, radians
def lowercase ( lowerCAmelCase__ : float , lowerCAmelCase__ : int = 18 , lowerCAmelCase__ : int = 10 ) -> float:
__a = angle_in_degrees - ((angle_in_degrees // 3_60.0) * 3_60.0)
# Converting from degrees to radians
__a = radians(lowerCAmelCase__ )
__a = angle_in_radians
__a = 3
__a = -1
for _ in range(lowerCAmelCase__ ):
result += (b * (angle_in_radians**a)) / factorial(lowerCAmelCase__ )
__a = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
__import__("doctest").testmod()
| 11 | 1 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"Intel/dpt-large": "https://huggingface.co/Intel/dpt-large/resolve/main/config.json",
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : List[str] = 'dpt'
def __init__( self , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.0 , _a=0.0 , _a=0.02 , _a=1E-12 , _a=384 , _a=16 , _a=3 , _a=False , _a=True , _a=[2, 5, 8, 11] , _a="project" , _a=[4, 2, 1, 0.5] , _a=[96, 192, 384, 768] , _a=256 , _a=-1 , _a=False , _a=True , _a=0.4 , _a=255 , _a=0.1 , _a=[1, 1_024, 24, 24] , _a=[0, 1] , _a=None , **_a , ):
super().__init__(**_a )
__a = hidden_size
__a = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info('''Initializing the config with a `BiT` backbone.''' )
__a = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
}
__a = BitConfig(**_a )
elif isinstance(_a , _a ):
logger.info('''Initializing the config with a `BiT` backbone.''' )
__a = BitConfig(**_a )
elif isinstance(_a , _a ):
__a = backbone_config
else:
raise ValueError(
f'''backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.''' )
__a = backbone_featmap_shape
__a = neck_ignore_stages
if readout_type != "project":
raise ValueError('''Readout type must be \'project\' when using `DPT-hybrid` mode.''' )
else:
__a = None
__a = None
__a = []
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = initializer_range
__a = layer_norm_eps
__a = image_size
__a = patch_size
__a = num_channels
__a = qkv_bias
__a = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError('''Readout_type must be one of [\'ignore\', \'add\', \'project\']''' )
__a = readout_type
__a = reassemble_factors
__a = neck_hidden_sizes
__a = fusion_hidden_size
__a = head_in_index
__a = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
__a = use_auxiliary_head
__a = auxiliary_loss_weight
__a = semantic_loss_ignore_index
__a = semantic_classifier_dropout
def __UpperCAmelCase ( self ):
__a = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
__a = self.backbone_config.to_dict()
__a = self.__class__.model_type
return output
| 11 |
"""simple docstring"""
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def lowercase ( lowerCAmelCase__ : dict ) -> tuple:
return (data["data"], data["target"])
def lowercase ( lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : np.ndarray ) -> np.ndarray:
__a = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(lowerCAmelCase__ , lowerCAmelCase__ )
# Predict target for test data
__a = xgb.predict(lowerCAmelCase__ )
__a = predictions.reshape(len(lowerCAmelCase__ ) , 1 )
return predictions
def lowercase ( ) -> None:
__a = fetch_california_housing()
__a , __a = data_handling(lowerCAmelCase__ )
__a , __a , __a , __a = train_test_split(
lowerCAmelCase__ , lowerCAmelCase__ , test_size=0.25 , random_state=1 )
__a = xgboost(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Error printing
print(f'''Mean Absolute Error : {mean_absolute_error(lowerCAmelCase__ , lowerCAmelCase__ )}''' )
print(f'''Mean Square Error : {mean_squared_error(lowerCAmelCase__ , lowerCAmelCase__ )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 11 | 1 |
"""simple docstring"""
from math import factorial
def lowercase ( lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : float ) -> float:
if successes > trials:
raise ValueError('''successes must be lower or equal to trials''' )
if trials < 0 or successes < 0:
raise ValueError('''the function is defined for non-negative integers''' )
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError('''the function is defined for non-negative integers''' )
if not 0 < prob < 1:
raise ValueError('''prob has to be in range of 1 - 0''' )
__a = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
__a = float(factorial(lowerCAmelCase__ ) )
coefficient /= factorial(lowerCAmelCase__ ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print("Probability of 2 successes out of 4 trails")
print("with probability of 0.75 is:", end=" ")
print(binomial_distribution(2, 4, 0.75))
| 11 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase_ = {
"configuration_efficientformer": [
"EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientFormerConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["EfficientFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientFormerForImageClassification",
"EfficientFormerForImageClassificationWithTeacher",
"EfficientFormerModel",
"EfficientFormerPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFEfficientFormerForImageClassification",
"TFEfficientFormerForImageClassificationWithTeacher",
"TFEfficientFormerModel",
"TFEfficientFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 11 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
lowercase_ = {"configuration_beit": ["BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BeitConfig", "BeitOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["BeitFeatureExtractor"]
lowercase_ = ["BeitImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"BEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BeitForImageClassification",
"BeitForMaskedImageModeling",
"BeitForSemanticSegmentation",
"BeitModel",
"BeitPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"FlaxBeitForImageClassification",
"FlaxBeitForMaskedImageModeling",
"FlaxBeitModel",
"FlaxBeitPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 11 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/config.json",
"funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json",
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/config.json",
"funnel-transformer/medium-base": "https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json",
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/config.json",
"funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json",
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json",
"funnel-transformer/xlarge-base": "https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json",
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = 'funnel'
__UpperCAmelCase : Union[str, Any] = {
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
}
def __init__( self , _a=30_522 , _a=[4, 4, 4] , _a=None , _a=2 , _a=768 , _a=12 , _a=64 , _a=3_072 , _a="gelu_new" , _a=0.1 , _a=0.1 , _a=0.0 , _a=0.1 , _a=None , _a=1E-9 , _a="mean" , _a="relative_shift" , _a=True , _a=True , _a=True , **_a , ):
__a = vocab_size
__a = block_sizes
__a = [1] * len(_a ) if block_repeats is None else block_repeats
assert len(_a ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
__a = num_decoder_layers
__a = d_model
__a = n_head
__a = d_head
__a = d_inner
__a = hidden_act
__a = hidden_dropout
__a = attention_dropout
__a = activation_dropout
__a = initializer_range
__a = initializer_std
__a = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], f'''Got {pooling_type} for `pooling_type` but only \'mean\' and \'max\' are supported.'''
__a = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], f'''Got {attention_type} for `attention_type` but only \'relative_shift\' and \'factorized\' are supported.'''
__a = attention_type
__a = separate_cls
__a = truncate_seq
__a = pool_q_only
super().__init__(**_a )
@property
def __UpperCAmelCase ( self ):
return sum(self.block_sizes )
@num_hidden_layers.setter
def __UpperCAmelCase ( self , _a ):
raise NotImplementedError(
'''This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.''' )
@property
def __UpperCAmelCase ( self ):
return len(self.block_sizes )
@num_blocks.setter
def __UpperCAmelCase ( self , _a ):
raise NotImplementedError('''This model does not support the setting of `num_blocks`. Please set `block_sizes`.''' )
| 11 | 1 |
"""simple docstring"""
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowercase_ = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Dict = XGLMTokenizer
__UpperCAmelCase : Any = XGLMTokenizerFast
__UpperCAmelCase : Tuple = True
__UpperCAmelCase : str = True
def __UpperCAmelCase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
__a = XGLMTokenizer(_a , keep_accents=_a )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCAmelCase ( self ):
__a = '''<pad>'''
__a = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def __UpperCAmelCase ( self ):
__a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(len(_a ) , 1_008 )
def __UpperCAmelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_008 )
def __UpperCAmelCase ( self ):
__a = XGLMTokenizer(_a , keep_accents=_a )
__a = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_a , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_a ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__a = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
__a = tokenizer.convert_tokens_to_ids(_a )
self.assertListEqual(
_a , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__a = tokenizer.convert_ids_to_tokens(_a )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def __UpperCAmelCase ( self ):
return XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
def __UpperCAmelCase ( self ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(_a , f.name )
__a = XGLMTokenizer(f.name , keep_accents=_a )
__a = pickle.dumps(_a )
pickle.loads(_a )
def __UpperCAmelCase ( self ):
if not self.test_rust_tokenizer:
return
__a = self.get_tokenizer()
__a = self.get_rust_tokenizer()
__a = '''I was born in 92000, and this is falsé.'''
__a = tokenizer.tokenize(_a )
__a = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
__a = tokenizer.encode(_a , add_special_tokens=_a )
__a = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
__a = self.get_rust_tokenizer()
__a = tokenizer.encode(_a )
__a = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
@slow
def __UpperCAmelCase ( self ):
__a = '''Hello World!'''
__a = [2, 31_227, 4_447, 35]
self.assertListEqual(_a , self.big_tokenizer.encode(_a ) )
@slow
def __UpperCAmelCase ( self ):
__a = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth'''
)
# fmt: off
__a = [2, 1_018, 67, 11, 1_988, 2_617, 5_631, 278, 11, 3_407, 48, 71_630, 28_085, 4, 3_234, 157, 13, 6, 5, 6, 4, 3_526, 768, 15, 659, 57, 298, 3_983, 864, 129, 21, 6, 5, 13_675, 377, 652, 7_580, 10_341, 155, 2_817, 422, 1_666, 7, 1_674, 53, 113, 202_277, 17_892, 33, 60, 87, 4, 3_234, 157, 61, 2_667, 52_376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(_a , self.big_tokenizer.encode(_a ) )
@slow
def __UpperCAmelCase ( self ):
# fmt: off
__a = {
'''input_ids''': [[2, 108_825, 1_163, 15, 88_010, 473, 15_898, 157, 13_672, 1_857, 312, 8, 238_021, 1_163, 53, 13_672, 1_857, 312, 8, 53_283, 182_396, 8, 18_566, 16, 36_733, 4_101, 8, 230, 244_017, 122_553, 7, 15, 132_597, 4, 293, 12_511, 7_610, 4, 3_414, 132_597, 9, 4, 32_361, 362, 4, 734, 28_512, 32_569, 18, 4, 32_361, 26_096, 14_982, 73, 18_715, 21_433, 235_261, 15, 492, 12_427, 16, 53, 18_715, 21_433, 65_454, 15, 23_659, 563, 16, 278, 597, 2_843, 595, 7_931, 182_396, 64_186, 22, 886, 595, 132_981, 53, 25_540, 3_449, 43_982, 39_901, 5_951, 878, 330, 4, 27_694, 80_269, 312, 53, 6_517, 11_780, 611, 20_408, 5], [2, 6, 132_597, 67, 42_897, 33, 592, 8, 163_729, 25_540, 361, 136_997, 109_514, 173_230, 7, 501, 60, 102_913, 196, 5_631, 235, 63_243, 473, 6, 231_757, 74, 5_277, 7_905, 53, 3_095, 37_317, 22, 454, 183_874, 5], [2, 268, 31_298, 46_530, 6, 132_935, 43_831, 7, 597, 32, 24, 3_688, 9_865, 5]],
'''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name='''facebook/xglm-564M''' , padding=_a , )
| 11 |
"""simple docstring"""
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
__a = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_a , '''width_multiplier''' ) )
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a , _a=13 , _a=64 , _a=2 , _a=3 , _a="swish" , _a=3 , _a=32 , _a=0.1 , _a=0.02 , _a=True , _a=True , _a=10 , _a=None , _a=0.25 , _a=0.0 , _a=0.0 , ):
__a = parent
__a = batch_size
__a = image_size
__a = patch_size
__a = num_channels
__a = make_divisible(512 * width_multiplier , divisor=8 )
__a = hidden_act
__a = conv_kernel_size
__a = output_stride
__a = classifier_dropout_prob
__a = use_labels
__a = is_training
__a = num_labels
__a = initializer_range
__a = scope
__a = width_multiplier
__a = ffn_dropout
__a = attn_dropout
def __UpperCAmelCase ( self ):
__a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.num_labels )
__a = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__a = self.get_config()
return config, pixel_values, labels, pixel_labels
def __UpperCAmelCase ( self ):
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def __UpperCAmelCase ( self , _a , _a , _a , _a ):
__a = MobileViTVaModel(config=_a )
model.to(_a )
model.eval()
__a = model(_a )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __UpperCAmelCase ( self , _a , _a , _a , _a ):
__a = self.num_labels
__a = MobileViTVaForImageClassification(_a )
model.to(_a )
model.eval()
__a = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self , _a , _a , _a , _a ):
__a = self.num_labels
__a = MobileViTVaForSemanticSegmentation(_a )
model.to(_a )
model.eval()
__a = model(_a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__a = model(_a , labels=_a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __UpperCAmelCase ( self ):
__a = self.prepare_config_and_inputs()
__a , __a , __a , __a = config_and_inputs
__a = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
__UpperCAmelCase : Union[str, Any] = (
{
'feature-extraction': MobileViTVaModel,
'image-classification': MobileViTVaForImageClassification,
'image-segmentation': MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : Union[str, Any] = False
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : List[str] = False
def __UpperCAmelCase ( self ):
__a = MobileViTVaModelTester(self )
__a = MobileViTVaConfigTester(self , config_class=_a , has_text_modality=_a )
def __UpperCAmelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileViTV2 does not use inputs_embeds''' )
def __UpperCAmelCase ( self ):
pass
@unittest.skip(reason='''MobileViTV2 does not support input and output embeddings''' )
def __UpperCAmelCase ( self ):
pass
@unittest.skip(reason='''MobileViTV2 does not output attentions''' )
def __UpperCAmelCase ( self ):
pass
@require_torch_multi_gpu
@unittest.skip(reason='''Got `CUDA error: misaligned address` for tests after this one being run.''' )
def __UpperCAmelCase ( self ):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __UpperCAmelCase ( self ):
pass
def __UpperCAmelCase ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(_a )
__a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __UpperCAmelCase ( self ):
def check_hidden_states_output(_a , _a , _a ):
__a = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(_a , _a ) )
__a = outputs.hidden_states
__a = 5
self.assertEqual(len(_a ) , _a )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__a = 2
for i in range(len(_a ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a = True
check_hidden_states_output(_a , _a , _a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_a )
@slow
def __UpperCAmelCase ( self ):
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = MobileViTVaModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def lowercase ( ) -> str:
__a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCAmelCase ( self ):
return (
MobileViTImageProcessor.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' )
if is_vision_available()
else None
)
@slow
def __UpperCAmelCase ( self ):
__a = MobileViTVaForImageClassification.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' ).to(
_a )
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
__a = model(**_a )
# verify the logits
__a = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , _a )
__a = torch.tensor([-1.6_336E00, -7.3_204E-02, -5.1_883E-01] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1E-4 ) )
@slow
def __UpperCAmelCase ( self ):
__a = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
__a = model.to(_a )
__a = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
__a = prepare_img()
__a = image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
__a = model(**_a )
__a = outputs.logits
# verify the logits
__a = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , _a )
__a = torch.tensor(
[
[[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]],
[[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]],
[[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]],
] , device=_a , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _a , atol=1E-4 ) )
@slow
def __UpperCAmelCase ( self ):
__a = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
__a = model.to(_a )
__a = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
__a = prepare_img()
__a = image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
__a = model(**_a )
__a = outputs.logits.detach().cpu()
__a = image_processor.post_process_semantic_segmentation(outputs=_a , target_sizes=[(50, 60)] )
__a = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , _a )
__a = image_processor.post_process_semantic_segmentation(outputs=_a )
__a = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , _a )
| 11 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"microsoft/trocr-base-handwritten": (
"https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = 'trocr'
__UpperCAmelCase : Union[str, Any] = ['past_key_values']
__UpperCAmelCase : int = {
'num_attention_heads': 'decoder_attention_heads',
'hidden_size': 'd_model',
'num_hidden_layers': 'decoder_layers',
}
def __init__( self , _a=50_265 , _a=1_024 , _a=12 , _a=16 , _a=4_096 , _a="gelu" , _a=512 , _a=0.1 , _a=0.0 , _a=0.0 , _a=2 , _a=0.02 , _a=0.0 , _a=True , _a=False , _a=True , _a=True , _a=1 , _a=0 , _a=2 , **_a , ):
__a = vocab_size
__a = d_model
__a = decoder_layers
__a = decoder_attention_heads
__a = decoder_ffn_dim
__a = activation_function
__a = max_position_embeddings
__a = dropout
__a = attention_dropout
__a = activation_dropout
__a = init_std
__a = decoder_layerdrop
__a = use_cache
__a = scale_embedding
__a = use_learned_position_embeddings
__a = layernorm_embedding
super().__init__(
pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , decoder_start_token_id=_a , **_a , )
| 11 |
"""simple docstring"""
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
lowercase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ):
super().__init__()
if hasattr(scheduler.config , '''steps_offset''' ) and scheduler.config.steps_offset != 1:
__a = (
f'''The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'''
f''' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '''
'''to update the config accordingly as leaving `steps_offset` might led to incorrect results'''
''' in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,'''
''' it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`'''
''' file'''
)
deprecate('''steps_offset!=1''' , '''1.0.0''' , _a , standard_warn=_a )
__a = dict(scheduler.config )
__a = 1
__a = FrozenDict(_a )
if hasattr(scheduler.config , '''skip_prk_steps''' ) and scheduler.config.skip_prk_steps is False:
__a = (
f'''The configuration file of this scheduler: {scheduler} has not set the configuration'''
''' `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make'''
''' sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to'''
''' incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face'''
''' Hub, it would be very nice if you could open a Pull request for the'''
''' `scheduler/scheduler_config.json` file'''
)
deprecate('''skip_prk_steps not set''' , '''1.0.0''' , _a , standard_warn=_a )
__a = dict(scheduler.config )
__a = True
__a = FrozenDict(_a )
if safety_checker is None:
logger.warning(
f'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
segmentation_model=_a , segmentation_processor=_a , vae=_a , text_encoder=_a , tokenizer=_a , unet=_a , scheduler=_a , safety_checker=_a , feature_extractor=_a , )
def __UpperCAmelCase ( self , _a = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__a = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_a )
def __UpperCAmelCase ( self ):
self.enable_attention_slicing(_a )
def __UpperCAmelCase ( self ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
__a = torch.device('''cuda''' )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(_a , _a )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __UpperCAmelCase ( self ):
if self.device != torch.device('''meta''' ) or not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_a , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self , _a , _a , _a , _a = 512 , _a = 512 , _a = 50 , _a = 7.5 , _a = None , _a = 1 , _a = 0.0 , _a = None , _a = None , _a = "pil" , _a = True , _a = None , _a = 1 , **_a , ):
__a = self.segmentation_processor(
text=[text] , images=[image] , padding='''max_length''' , return_tensors='''pt''' ).to(self.device )
__a = self.segmentation_model(**_a )
__a = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
__a = self.numpy_to_pil(_a )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
__a = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=_a , image=_a , mask_image=_a , height=_a , width=_a , num_inference_steps=_a , guidance_scale=_a , negative_prompt=_a , num_images_per_prompt=_a , eta=_a , generator=_a , latents=_a , output_type=_a , return_dict=_a , callback=_a , callback_steps=_a , )
| 11 | 1 |
"""simple docstring"""
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
lowercase_ = [
"python",
"tqdm",
"regex",
"requests",
"packaging",
"filelock",
"numpy",
"tokenizers",
"huggingface-hub",
"safetensors",
"accelerate",
"pyyaml",
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def lowercase ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Any=None ) -> Any:
require_version(deps[pkg] , lowerCAmelCase__ )
| 11 |
"""simple docstring"""
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
lowercase_ = logging.get_logger(__name__)
@add_end_docstrings(__SCREAMING_SNAKE_CASE )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , **_a ):
super().__init__(**_a )
if self.framework != "pt":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
# No specific FOR_XXX available yet
def __call__( self , _a , **_a ):
return super().__call__(_a , **_a )
def __UpperCAmelCase ( self , **_a ):
__a = {}
if "candidate_labels" in kwargs:
__a = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
__a = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def __UpperCAmelCase ( self , _a , _a=None , _a="This is a sound of {}." ):
if isinstance(_a , _a ):
if audio.startswith('''http://''' ) or audio.startswith('''https://''' ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
__a = requests.get(_a ).content
else:
with open(_a , '''rb''' ) as f:
__a = f.read()
if isinstance(_a , _a ):
__a = ffmpeg_read(_a , self.feature_extractor.sampling_rate )
if not isinstance(_a , np.ndarray ):
raise ValueError('''We expect a numpy ndarray as input''' )
if len(audio.shape ) != 1:
raise ValueError('''We expect a single channel audio input for ZeroShotAudioClassificationPipeline''' )
__a = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors='''pt''' )
__a = candidate_labels
__a = [hypothesis_template.format(_a ) for x in candidate_labels]
__a = self.tokenizer(_a , return_tensors=self.framework , padding=_a )
__a = [text_inputs]
return inputs
def __UpperCAmelCase ( self , _a ):
__a = model_inputs.pop('''candidate_labels''' )
__a = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] , _a ):
__a = text_inputs[0]
else:
# Batching case.
__a = text_inputs[0][0]
__a = self.model(**_a , **_a )
__a = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_audio,
}
return model_outputs
def __UpperCAmelCase ( self , _a ):
__a = model_outputs.pop('''candidate_labels''' )
__a = model_outputs['''logits'''][0]
if self.framework == "pt":
__a = logits.softmax(dim=0 )
__a = probs.tolist()
else:
raise ValueError('''`tf` framework not supported.''' )
__a = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(_a , _a ) , key=lambda _a : -x[0] )
]
return result
| 11 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
lowercase_ = logging.get_logger(__name__)
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a=False , _a=False , _a=6.0 , _a=None , _a=False , _a=False , _a=None , _a="fp4" , _a=False , **_a , ):
__a = load_in_abit
__a = load_in_abit
__a = llm_inta_threshold
__a = llm_inta_skip_modules
__a = llm_inta_enable_fpaa_cpu_offload
__a = llm_inta_has_fpaa_weight
__a = bnb_abit_quant_type
__a = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
__a = torch.floataa
elif isinstance(_a , _a ):
__a = getattr(_a , _a )
elif isinstance(_a , torch.dtype ):
__a = bnb_abit_compute_dtype
else:
raise ValueError('''bnb_4bit_compute_dtype must be a string or a torch.dtype''' )
self.post_init()
def __UpperCAmelCase ( self ):
if not isinstance(self.llm_inta_threshold , _a ):
raise ValueError('''llm_int8_threshold must be a float''' )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , _a ):
raise ValueError('''llm_int8_skip_modules must be a list of strings''' )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , _a ):
raise ValueError('''llm_int8_enable_fp32_cpu_offload must be a boolean''' )
if not isinstance(self.llm_inta_has_fpaa_weight , _a ):
raise ValueError('''llm_int8_has_fp16_weight must be a boolean''' )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ):
raise ValueError('''bnb_4bit_compute_dtype must be torch.dtype''' )
if not isinstance(self.bnb_abit_quant_type , _a ):
raise ValueError('''bnb_4bit_quant_type must be a string''' )
if not isinstance(self.bnb_abit_use_double_quant , _a ):
raise ValueError('''bnb_4bit_use_double_quant must be a boolean''' )
if self.load_in_abit and not version.parse(importlib.metadata.version('''bitsandbytes''' ) ) >= version.parse(
'''0.39.0''' ):
raise ValueError(
'''4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version''' )
def __UpperCAmelCase ( self ):
return self.load_in_abit or self.load_in_abit
def __UpperCAmelCase ( self ):
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def __UpperCAmelCase ( cls , _a , _a , **_a ):
__a = cls(**_a )
__a = []
for key, value in kwargs.items():
if hasattr(_a , _a ):
setattr(_a , _a , _a )
to_remove.append(_a )
for key in to_remove:
kwargs.pop(_a , _a )
if return_unused_kwargs:
return config, kwargs
else:
return config
def __UpperCAmelCase ( self , _a ):
with open(_a , '''w''' , encoding='''utf-8''' ) as writer:
__a = self.to_dict()
__a = json.dumps(_a , indent=2 , sort_keys=_a ) + '''\n'''
writer.write(_a )
def __UpperCAmelCase ( self ):
__a = copy.deepcopy(self.__dict__ )
__a = str(output['''bnb_4bit_compute_dtype'''] ).split('''.''' )[1]
return output
def __repr__( self ):
return f'''{self.__class__.__name__} {self.to_json_string()}'''
def __UpperCAmelCase ( self , _a = True ):
if use_diff is True:
__a = self.to_diff_dict()
else:
__a = self.to_dict()
return json.dumps(_a , indent=2 , sort_keys=_a ) + "\n"
def __UpperCAmelCase ( self ):
__a = self.to_dict()
# get the default config dict
__a = BitsAndBytesConfig().to_dict()
__a = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
__a = value
return serializable_config_dict
| 11 |
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self , _a ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ):
__a = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(_a )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
__a = '''sgugger/tiny-distilbert-classification'''
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , only_pretrain_model=_a , )
__a = PyTorchBenchmark(_a )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , torchscript=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , fpaa=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = AutoConfig.from_pretrained(_a )
# set architectures equal to `None`
__a = None
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a , configs=[config] )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == '''cpu''' , '''Can\'t do half precision''' )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , fpaa=_a , multi_process=_a , )
__a = PyTorchBenchmark(_a )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = AutoConfig.from_pretrained(_a )
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a , configs=[config] )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tinier_bart'''
__a = AutoConfig.from_pretrained(_a )
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a , configs=[config] )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = AutoConfig.from_pretrained(_a )
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a , configs=[config] )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tinier_bart'''
__a = AutoConfig.from_pretrained(_a )
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a , configs=[config] )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , save_to_csv=_a , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_a , '''inf_time.csv''' ) , train_memory_csv_file=os.path.join(_a , '''train_mem.csv''' ) , inference_memory_csv_file=os.path.join(_a , '''inf_mem.csv''' ) , train_time_csv_file=os.path.join(_a , '''train_time.csv''' ) , env_info_csv_file=os.path.join(_a , '''env.csv''' ) , multi_process=_a , )
__a = PyTorchBenchmark(_a )
benchmark.run()
self.assertTrue(Path(os.path.join(_a , '''inf_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_a , '''train_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_a , '''inf_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_a , '''train_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_a , '''env.csv''' ) ).exists() )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(_a ):
self.assertTrue(hasattr(_a , '''sequential''' ) )
self.assertTrue(hasattr(_a , '''cumulative''' ) )
self.assertTrue(hasattr(_a , '''current''' ) )
self.assertTrue(hasattr(_a , '''total''' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_a , '''log.txt''' ) , log_print=_a , trace_memory_line_by_line=_a , multi_process=_a , )
__a = PyTorchBenchmark(_a )
__a = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(_a , '''log.txt''' ) ).exists() )
| 11 | 1 |
"""simple docstring"""
from collections.abc import Callable
import numpy as np
def lowercase ( lowerCAmelCase__ : Callable , lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : float ) -> np.ndarray:
__a = int(np.ceil((x_end - xa) / step_size ) )
__a = np.zeros((n + 1,) )
__a = ya
__a = xa
for k in range(lowerCAmelCase__ ):
__a = y[k] + step_size * ode_func(lowerCAmelCase__ , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FocalNetForImageClassification",
"FocalNetForMaskedImageModeling",
"FocalNetBackbone",
"FocalNetModel",
"FocalNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 11 | 1 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a , _a , _a = 0 ):
__a , __a = row, column
__a = [[default_value for c in range(_a )] for r in range(_a )]
def __str__( self ):
__a = f'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
__a = 0
for row_vector in self.array:
for obj in row_vector:
__a = max(_a , len(str(_a ) ) )
__a = f'''%{max_element_length}s'''
# Make string and return
def single_line(_a ) -> str:
nonlocal string_format_identifier
__a = '''['''
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(_a ) for row_vector in self.array )
return s
def __repr__( self ):
return str(self )
def __UpperCAmelCase ( self , _a ):
if not (isinstance(_a , (list, tuple) ) and len(_a ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self , _a ):
assert self.validate_indicies(_a )
return self.array[loc[0]][loc[1]]
def __setitem__( self , _a , _a ):
assert self.validate_indicies(_a )
__a = value
def __add__( self , _a ):
assert isinstance(_a , _a )
assert self.row == another.row and self.column == another.column
# Add
__a = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__a = self[r, c] + another[r, c]
return result
def __neg__( self ):
__a = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__a = -self[r, c]
return result
def __sub__( self , _a ):
return self + (-another)
def __mul__( self , _a ):
if isinstance(_a , (int, float) ): # Scalar multiplication
__a = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__a = self[r, c] * another
return result
elif isinstance(_a , _a ): # Matrix multiplication
assert self.column == another.row
__a = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
__a = f'''Unsupported type given for another ({type(_a )})'''
raise TypeError(_a )
def __UpperCAmelCase ( self ):
__a = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
__a = self[r, c]
return result
def __UpperCAmelCase ( self , _a , _a ):
assert isinstance(_a , _a ) and isinstance(_a , _a )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
__a = v.transpose()
__a = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def lowercase ( ) -> None:
# a^(-1)
__a = Matrix(3 , 3 , 0 )
for i in range(3 ):
__a = 1
print(f'''a^(-1) is {ainv}''' )
# u, v
__a = Matrix(3 , 1 , 0 )
__a , __a , __a = 1, 2, -3
__a = Matrix(3 , 1 , 0 )
__a , __a , __a = 4, -2, 5
print(f'''u is {u}''' )
print(f'''v is {v}''' )
print(f'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(f'''(a + uv^T)^(-1) is {ainv.sherman_morrison(lowerCAmelCase__ , lowerCAmelCase__ )}''' )
def lowercase ( ) -> None:
import doctest
doctest.testmod()
testa()
| 11 |
"""simple docstring"""
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self , _a , _a , _a ):
self.assertEqual(len(_a ) , len(_a ) )
for a, b in zip(_a , _a ):
self.assertAlmostEqual(_a , _a , delta=_a )
def __UpperCAmelCase ( self ):
__a = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(_a ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1E-2 )
def __UpperCAmelCase ( self ):
__a = None
ops.enable_eager_execution_internal()
__a = tf.config.list_physical_devices('''CPU''' )
if len(_a ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
__a = tf.config.list_logical_devices(device_type='''CPU''' )
__a = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
__a = GradientAccumulator()
__a = tf.Variable([4.0, 3.0] )
__a , __a = create_optimizer(5E-5 , 10 , 5 )
__a = tf.Variable([0.0, 0.0] , trainable=_a )
def accumulate_on_replica(_a ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(_a , _a ):
with strategy.scope():
__a = strategy.experimental_local_results(_a )
local_variables[0].assign(_a )
local_variables[1].assign(_a )
strategy.run(_a , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(_a )
def _check_local_values(_a , _a ):
__a = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , _a , tol=1E-2 )
self.assertListAlmostEqual(values[1].value() , _a , tol=1E-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 11 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/config.json",
"funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json",
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/config.json",
"funnel-transformer/medium-base": "https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json",
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/config.json",
"funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json",
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json",
"funnel-transformer/xlarge-base": "https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json",
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = 'funnel'
__UpperCAmelCase : Union[str, Any] = {
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
}
def __init__( self , _a=30_522 , _a=[4, 4, 4] , _a=None , _a=2 , _a=768 , _a=12 , _a=64 , _a=3_072 , _a="gelu_new" , _a=0.1 , _a=0.1 , _a=0.0 , _a=0.1 , _a=None , _a=1E-9 , _a="mean" , _a="relative_shift" , _a=True , _a=True , _a=True , **_a , ):
__a = vocab_size
__a = block_sizes
__a = [1] * len(_a ) if block_repeats is None else block_repeats
assert len(_a ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
__a = num_decoder_layers
__a = d_model
__a = n_head
__a = d_head
__a = d_inner
__a = hidden_act
__a = hidden_dropout
__a = attention_dropout
__a = activation_dropout
__a = initializer_range
__a = initializer_std
__a = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], f'''Got {pooling_type} for `pooling_type` but only \'mean\' and \'max\' are supported.'''
__a = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], f'''Got {attention_type} for `attention_type` but only \'relative_shift\' and \'factorized\' are supported.'''
__a = attention_type
__a = separate_cls
__a = truncate_seq
__a = pool_q_only
super().__init__(**_a )
@property
def __UpperCAmelCase ( self ):
return sum(self.block_sizes )
@num_hidden_layers.setter
def __UpperCAmelCase ( self , _a ):
raise NotImplementedError(
'''This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.''' )
@property
def __UpperCAmelCase ( self ):
return len(self.block_sizes )
@num_blocks.setter
def __UpperCAmelCase ( self , _a ):
raise NotImplementedError('''This model does not support the setting of `num_blocks`. Please set `block_sizes`.''' )
| 11 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
lowercase_ = None
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
lowercase_ = {
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/mbart-large-en-ro": "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json",
"facebook/mbart-large-cc25": "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json",
},
}
lowercase_ = {
"facebook/mbart-large-en-ro": 1_0_2_4,
"facebook/mbart-large-cc25": 1_0_2_4,
}
# fmt: off
lowercase_ = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Tuple = VOCAB_FILES_NAMES
__UpperCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Tuple = ['input_ids', 'attention_mask']
__UpperCAmelCase : Optional[Any] = MBartTokenizer
__UpperCAmelCase : List[int] = []
__UpperCAmelCase : List[int] = []
def __init__( self , _a=None , _a=None , _a="<s>" , _a="</s>" , _a="</s>" , _a="<s>" , _a="<unk>" , _a="<pad>" , _a="<mask>" , _a=None , _a=None , _a=None , **_a , ):
# Mask token behave like a normal word, i.e. include the space before it
__a = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
super().__init__(
vocab_file=_a , tokenizer_file=_a , bos_token=_a , eos_token=_a , sep_token=_a , cls_token=_a , unk_token=_a , pad_token=_a , mask_token=_a , src_lang=_a , tgt_lang=_a , additional_special_tokens=_a , **_a , )
__a = vocab_file
__a = False if not self.vocab_file else True
__a = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
__a = {
lang_code: self.convert_tokens_to_ids(_a ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
__a = src_lang if src_lang is not None else '''en_XX'''
__a = self.convert_tokens_to_ids(self._src_lang )
__a = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __UpperCAmelCase ( self ):
return self._src_lang
@src_lang.setter
def __UpperCAmelCase ( self , _a ):
__a = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __UpperCAmelCase ( self , _a , _a = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __UpperCAmelCase ( self , _a , _a = None ):
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCAmelCase ( self , _a , _a , _a , _a , **_a ):
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
__a = src_lang
__a = self(_a , add_special_tokens=_a , return_tensors=_a , **_a )
__a = self.convert_tokens_to_ids(_a )
__a = tgt_lang_id
return inputs
def __UpperCAmelCase ( self , _a , _a = "en_XX" , _a = None , _a = "ro_RO" , **_a , ):
__a = src_lang
__a = tgt_lang
return super().prepare_seqaseq_batch(_a , _a , **_a )
def __UpperCAmelCase ( self ):
return self.set_src_lang_special_tokens(self.src_lang )
def __UpperCAmelCase ( self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __UpperCAmelCase ( self , _a ):
__a = self.convert_tokens_to_ids(_a )
__a = []
__a = [self.eos_token_id, self.cur_lang_code]
__a = self.convert_ids_to_tokens(self.prefix_tokens )
__a = self.convert_ids_to_tokens(self.suffix_tokens )
__a = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __UpperCAmelCase ( self , _a ):
__a = self.convert_tokens_to_ids(_a )
__a = []
__a = [self.eos_token_id, self.cur_lang_code]
__a = self.convert_ids_to_tokens(self.prefix_tokens )
__a = self.convert_ids_to_tokens(self.suffix_tokens )
__a = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __UpperCAmelCase ( self , _a , _a = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(_a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory.''' )
return
__a = os.path.join(
_a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file , _a )
return (out_vocab_file,)
| 11 | 1 |
"""simple docstring"""
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
lowercase_ = logging.get_logger(__name__)
@add_end_docstrings(__SCREAMING_SNAKE_CASE )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , **_a ):
super().__init__(**_a )
if self.framework != "pt":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
# No specific FOR_XXX available yet
def __call__( self , _a , **_a ):
return super().__call__(_a , **_a )
def __UpperCAmelCase ( self , **_a ):
__a = {}
if "candidate_labels" in kwargs:
__a = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
__a = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def __UpperCAmelCase ( self , _a , _a=None , _a="This is a sound of {}." ):
if isinstance(_a , _a ):
if audio.startswith('''http://''' ) or audio.startswith('''https://''' ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
__a = requests.get(_a ).content
else:
with open(_a , '''rb''' ) as f:
__a = f.read()
if isinstance(_a , _a ):
__a = ffmpeg_read(_a , self.feature_extractor.sampling_rate )
if not isinstance(_a , np.ndarray ):
raise ValueError('''We expect a numpy ndarray as input''' )
if len(audio.shape ) != 1:
raise ValueError('''We expect a single channel audio input for ZeroShotAudioClassificationPipeline''' )
__a = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors='''pt''' )
__a = candidate_labels
__a = [hypothesis_template.format(_a ) for x in candidate_labels]
__a = self.tokenizer(_a , return_tensors=self.framework , padding=_a )
__a = [text_inputs]
return inputs
def __UpperCAmelCase ( self , _a ):
__a = model_inputs.pop('''candidate_labels''' )
__a = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] , _a ):
__a = text_inputs[0]
else:
# Batching case.
__a = text_inputs[0][0]
__a = self.model(**_a , **_a )
__a = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_audio,
}
return model_outputs
def __UpperCAmelCase ( self , _a ):
__a = model_outputs.pop('''candidate_labels''' )
__a = model_outputs['''logits'''][0]
if self.framework == "pt":
__a = logits.softmax(dim=0 )
__a = probs.tolist()
else:
raise ValueError('''`tf` framework not supported.''' )
__a = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(_a , _a ) , key=lambda _a : -x[0] )
]
return result
| 11 |
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
lowercase_ = "\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n"
class __lowerCAmelCase ( unittest.TestCase , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
__a = load_tool('''text-question-answering''' )
self.tool.setup()
__a = load_tool('''text-question-answering''' , remote=_a )
def __UpperCAmelCase ( self ):
__a = self.tool(_a , '''What did Hugging Face do in April 2021?''' )
self.assertEqual(_a , '''launched the BigScience Research Workshop''' )
def __UpperCAmelCase ( self ):
__a = self.remote_tool(_a , '''What did Hugging Face do in April 2021?''' )
self.assertEqual(_a , '''launched the BigScience Research Workshop''' )
def __UpperCAmelCase ( self ):
__a = self.tool(text=_a , question='''What did Hugging Face do in April 2021?''' )
self.assertEqual(_a , '''launched the BigScience Research Workshop''' )
def __UpperCAmelCase ( self ):
__a = self.remote_tool(text=_a , question='''What did Hugging Face do in April 2021?''' )
self.assertEqual(_a , '''launched the BigScience Research Workshop''' )
| 11 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=4 , ):
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_attention_mask
__a = use_token_type_ids
__a = use_labels
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = type_sequence_label_size
__a = initializer_range
__a = num_choices
def __UpperCAmelCase ( self ):
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = None
if self.use_attention_mask:
__a = random_attention_mask([self.batch_size, self.seq_length] )
__a = None
if self.use_token_type_ids:
__a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __UpperCAmelCase ( self ):
__a = self.prepare_config_and_inputs()
__a , __a , __a , __a = config_and_inputs
__a = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : str = True
__UpperCAmelCase : str = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __UpperCAmelCase ( self ):
__a = FlaxRoFormerModelTester(self )
@slow
def __UpperCAmelCase ( self ):
for model_class_name in self.all_model_classes:
__a = model_class_name.from_pretrained('''junnyu/roformer_chinese_small''' , from_pt=_a )
__a = model(np.ones((1, 1) ) )
self.assertIsNotNone(_a )
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self ):
__a = FlaxRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
__a = jnp.array([[0, 1, 2, 3, 4, 5]] )
__a = model(_a )[0]
__a = 50_000
__a = (1, 6, vocab_size)
self.assertEqual(output.shape , _a )
__a = jnp.array(
[[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , _a , atol=1E-4 ) )
| 11 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {
"configuration_blip_2": [
"BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Blip2Config",
"Blip2QFormerConfig",
"Blip2VisionConfig",
],
"processing_blip_2": ["Blip2Processor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Blip2Model",
"Blip2QFormerModel",
"Blip2PreTrainedModel",
"Blip2ForConditionalGeneration",
"Blip2VisionModel",
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 11 | 1 |
"""simple docstring"""
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 11 |
"""simple docstring"""
from __future__ import annotations
def lowercase ( lowerCAmelCase__ : list[int] , lowerCAmelCase__ : list[int] , lowerCAmelCase__ : list[int] , lowerCAmelCase__ : list[list[str]] , lowerCAmelCase__ : int , ) -> None:
__a = len(lowerCAmelCase__ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['''. ''' * i + '''Q ''' + '''. ''' * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(lowerCAmelCase__ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , lowerCAmelCase__ , lowerCAmelCase__ , )
def lowercase ( lowerCAmelCase__ : int ) -> None:
__a = []
depth_first_search([] , [] , [] , lowerCAmelCase__ , lowerCAmelCase__ )
# Print all the boards
for board in boards:
for column in board:
print(lowerCAmelCase__ )
print('''''' )
print(len(lowerCAmelCase__ ) , '''solutions were found.''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 11 | 1 |
"""simple docstring"""
from typing import Dict, Optional
import numpy as np
import datasets
lowercase_ = "\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n"
lowercase_ = "\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric(\"mean_iou\")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n"
lowercase_ = "\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}"
def lowercase ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : bool , lowerCAmelCase__ : Optional[Dict[int, int]] = None , lowerCAmelCase__ : bool = False , ) -> Union[str, Any]:
if label_map is not None:
for old_id, new_id in label_map.items():
__a = new_id
# turn into Numpy arrays
__a = np.array(lowerCAmelCase__ )
__a = np.array(lowerCAmelCase__ )
if reduce_labels:
__a = 255
__a = label - 1
__a = 255
__a = label != ignore_index
__a = np.not_equal(lowerCAmelCase__ , lowerCAmelCase__ )
__a = pred_label[mask]
__a = np.array(lowerCAmelCase__ )[mask]
__a = pred_label[pred_label == label]
__a = np.histogram(lowerCAmelCase__ , bins=lowerCAmelCase__ , range=(0, num_labels - 1) )[0]
__a = np.histogram(lowerCAmelCase__ , bins=lowerCAmelCase__ , range=(0, num_labels - 1) )[0]
__a = np.histogram(lowerCAmelCase__ , bins=lowerCAmelCase__ , range=(0, num_labels - 1) )[0]
__a = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , lowerCAmelCase__ : int , lowerCAmelCase__ : bool , lowerCAmelCase__ : Optional[Dict[int, int]] = None , lowerCAmelCase__ : bool = False , ) -> Dict:
__a = np.zeros((num_labels,) , dtype=np.floataa )
__a = np.zeros((num_labels,) , dtype=np.floataa )
__a = np.zeros((num_labels,) , dtype=np.floataa )
__a = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
__a , __a , __a , __a = intersect_and_union(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : bool , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : Optional[Dict[int, int]] = None , lowerCAmelCase__ : bool = False , ) -> List[str]:
__a , __a , __a , __a = total_intersect_and_union(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# compute metrics
__a = {}
__a = total_area_intersect.sum() / total_area_label.sum()
__a = total_area_intersect / total_area_union
__a = total_area_intersect / total_area_label
__a = np.nanmean(lowerCAmelCase__ )
__a = np.nanmean(lowerCAmelCase__ )
__a = all_acc
__a = iou
__a = acc
if nan_to_num is not None:
__a = {metric: np.nan_to_num(lowerCAmelCase__ , nan=lowerCAmelCase__ ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'''predictions''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ),
'''references''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ),
} ) , reference_urls=[
'''https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'''
] , )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a = None , _a = None , _a = False , ):
__a = mean_iou(
results=_a , gt_seg_maps=_a , num_labels=_a , ignore_index=_a , nan_to_num=_a , label_map=_a , reduce_labels=_a , )
return iou_result
| 11 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase_ = {
"configuration_vision_text_dual_encoder": ["VisionTextDualEncoderConfig"],
"processing_vision_text_dual_encoder": ["VisionTextDualEncoderProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["VisionTextDualEncoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["FlaxVisionTextDualEncoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["TFVisionTextDualEncoderModel"]
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 11 | 1 |
"""simple docstring"""
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
lowercase_ = logging.getLogger(__name__)
lowercase_ = "pytorch_model.bin"
@dataclasses.dataclass
class __lowerCAmelCase :
'''simple docstring'''
__UpperCAmelCase : str = dataclasses.field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models.'} )
__UpperCAmelCase : Optional[str] = dataclasses.field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co.'} , )
@dataclasses.dataclass
class __lowerCAmelCase :
'''simple docstring'''
__UpperCAmelCase : str = dataclasses.field(metadata={'help': 'A csv or a json file containing the training data.'} )
__UpperCAmelCase : str = dataclasses.field(metadata={'help': 'A csv or a json file containing the data to predict on.'} )
__UpperCAmelCase : Optional[str] = dataclasses.field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'A csv or a json file containing the validation data.'} )
__UpperCAmelCase : Optional[str] = dataclasses.field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'The name of the task to train on.'} , )
__UpperCAmelCase : Optional[List[str]] = dataclasses.field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'The list of labels for the task.'} )
@dataclasses.dataclass
class __lowerCAmelCase :
'''simple docstring'''
__UpperCAmelCase : str = dataclasses.field(
metadata={'help': 'The output directory where the model predictions and checkpoints will be written.'} )
__UpperCAmelCase : Optional[str] = dataclasses.field(
default='accuracy' , metadata={'help': 'The evaluation metric used for the task.'} )
__UpperCAmelCase : Optional[str] = dataclasses.field(
default='no' , metadata={
'help': 'The evaluation strategy to adopt during training. Possible values are: ["no", "step", "epoch]'
} , )
__UpperCAmelCase : Optional[int] = dataclasses.field(
default=1_0 , metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} , )
__UpperCAmelCase : Optional[float] = dataclasses.field(
default=0.0 , metadata={
'help': 'How much the specified evaluation metric must improve to satisfy early stopping conditions.'
} , )
__UpperCAmelCase : Optional[bool] = dataclasses.field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Whether to filter the pseudo-labeled data based on the confidence score.'} , )
__UpperCAmelCase : Optional[bool] = dataclasses.field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Whether to filter the pseudo-labeled data based on the validation performance.'} , )
__UpperCAmelCase : Optional[bool] = dataclasses.field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Whether to fine-tune on labeled data after pseudo training.'} , )
__UpperCAmelCase : Optional[float] = dataclasses.field(
default=0.0 , metadata={'help': 'Confidence threshold for pseudo-labeled data filtering.'} , )
__UpperCAmelCase : Optional[int] = dataclasses.field(
default=1_0_0 , metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} , )
__UpperCAmelCase : Optional[int] = dataclasses.field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Random seed for initialization.'} , )
def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : str , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Union[str, Any] ) -> Optional[int]:
__a = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
__a = dataset.filter(lambda lowerCAmelCase__ : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
__a = int(eval_result * len(lowerCAmelCase__ ) )
print(lowerCAmelCase__ )
__a = dataset.sort('''probability''' , reverse=lowerCAmelCase__ )
__a = dataset.select(range(lowerCAmelCase__ ) )
__a = dataset.remove_columns(['''label''', '''probability'''] )
__a = dataset.rename_column('''prediction''' , '''label''' )
__a = dataset.map(lambda lowerCAmelCase__ : {"label": idalabel[example["label"]]} )
__a = dataset.shuffle(seed=args.seed )
__a = os.path.join(lowerCAmelCase__ , f'''train_pseudo.{args.data_file_extension}''' )
if args.data_file_extension == "csv":
dataset.to_csv(lowerCAmelCase__ , index=lowerCAmelCase__ )
else:
dataset.to_json(lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : str , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Union[str, Any] , **lowerCAmelCase__ : List[Any] ) -> str:
__a = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
__a = STModelArguments(model_name_or_path=lowerCAmelCase__ )
__a = STDataArguments(train_file=lowerCAmelCase__ , infer_file=lowerCAmelCase__ )
__a = STTrainingArguments(output_dir=lowerCAmelCase__ )
__a = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(lowerCAmelCase__ ).items():
setattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
for key, value in kwargs.items():
if hasattr(lowerCAmelCase__ , lowerCAmelCase__ ):
setattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Sanity checks
__a = {}
__a = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
__a = args.train_file
__a = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
__a = args.eval_file
for key in data_files:
__a = data_files[key].split('''.''' )[-1]
assert extension in ["csv", "json"], f'''`{key}_file` should be a csv or a json file.'''
if args.data_file_extension is None:
__a = extension
else:
assert extension == args.data_file_extension, f'''`{key}_file` should be a {args.data_file_extension} file`.'''
assert (
args.eval_metric in datasets.list_metrics()
), f'''{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.'''
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info('''Creating the initial data directory for self-training...''' )
__a = f'''{args.output_dir}/self-train_iter-{{}}'''.format
__a = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=lowerCAmelCase__ )
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
accelerator.wait_for_everyone()
__a = None
__a = None
__a = 0
__a = False
# Show the progress bar
__a = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
__a = data_dir_format(lowerCAmelCase__ )
assert os.path.exists(lowerCAmelCase__ )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
__a = os.path.join(lowerCAmelCase__ , '''stage-1''' )
__a = {
'''accelerator''': accelerator,
'''model_name_or_path''': args.model_name_or_path,
'''cache_dir''': args.cache_dir,
'''do_train''': True,
'''train_file''': data_files['''train'''] if iteration == 0 else data_files['''train_pseudo'''],
'''do_eval''': True if args.eval_file is not None else False,
'''eval_file''': data_files['''eval'''],
'''do_predict''': True,
'''infer_file''': data_files['''infer'''],
'''task_name''': args.task_name,
'''label_list''': args.label_list,
'''output_dir''': current_output_dir,
'''eval_metric''': args.eval_metric,
'''evaluation_strategy''': args.evaluation_strategy,
'''early_stopping_patience''': args.early_stopping_patience,
'''early_stopping_threshold''': args.early_stopping_threshold,
'''seed''': args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(lowerCAmelCase__ , lowerCAmelCase__ ):
arguments_dict.update({key: value} )
__a = os.path.join(lowerCAmelCase__ , '''best-checkpoint''' , lowerCAmelCase__ )
if os.path.exists(lowerCAmelCase__ ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.''' , lowerCAmelCase__ , lowerCAmelCase__ , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 1 *****''' , lowerCAmelCase__ )
finetune(**lowerCAmelCase__ )
accelerator.wait_for_everyone()
assert os.path.exists(lowerCAmelCase__ )
logger.info('''Self-training job completed: iteration: %d, stage: 1.''' , lowerCAmelCase__ )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
__a = os.path.join(lowerCAmelCase__ , '''best-checkpoint''' )
__a = os.path.join(lowerCAmelCase__ , '''stage-2''' )
# Update arguments_dict
__a = model_path
__a = data_files['''train''']
__a = current_output_dir
__a = os.path.join(lowerCAmelCase__ , '''best-checkpoint''' , lowerCAmelCase__ )
if os.path.exists(lowerCAmelCase__ ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.''' , lowerCAmelCase__ , lowerCAmelCase__ , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 2 *****''' , lowerCAmelCase__ )
finetune(**lowerCAmelCase__ )
accelerator.wait_for_everyone()
assert os.path.exists(lowerCAmelCase__ )
logger.info('''Self-training job completed: iteration: %d, stage: 2.''' , lowerCAmelCase__ )
__a = iteration
__a = data_dir_format(iteration + 1 )
__a = AutoConfig.from_pretrained(os.path.join(lowerCAmelCase__ , '''best-checkpoint''' ) )
__a = config.idalabel
__a = os.path.join(lowerCAmelCase__ , '''eval_results_best-checkpoint.json''' )
__a = os.path.join(lowerCAmelCase__ , '''test_results_best-checkpoint.json''' )
assert os.path.exists(lowerCAmelCase__ )
with open(lowerCAmelCase__ , '''r''' ) as f:
__a = float(json.load(lowerCAmelCase__ )[args.eval_metric] )
__a = os.path.join(lowerCAmelCase__ , '''infer_output_best-checkpoint.csv''' )
assert os.path.exists(lowerCAmelCase__ )
# Loading the dataset from local csv or json files.
__a = load_dataset(args.data_file_extension , data_files={'''data''': data_files['''infer''']} )['''data''']
__a = load_dataset('''csv''' , data_files={'''data''': infer_output_file} )['''data''']
if accelerator.is_main_process:
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
shutil.copy(lowerCAmelCase__ , os.path.join(lowerCAmelCase__ , f'''eval_results_iter-{iteration}.json''' ) )
if os.path.exists(lowerCAmelCase__ ):
shutil.copy(lowerCAmelCase__ , os.path.join(lowerCAmelCase__ , f'''test_results_iter-{iteration}.json''' ) )
create_pseudo_labeled_data(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
accelerator.wait_for_everyone()
__a = os.path.join(lowerCAmelCase__ , f'''train_pseudo.{args.data_file_extension}''' )
if args.evaluation_strategy != IntervalStrategy.NO.value:
__a = eval_result
if best_iteration is None:
__a = new_iteration
__a = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
__a = new_iteration
__a = new_eval_result
__a = 0
else:
if new_eval_result == best_eval_result:
__a = new_iteration
__a = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
__a = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info('''Best iteration: %d''' , lowerCAmelCase__ )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , lowerCAmelCase__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(lowerCAmelCase__ , f'''eval_results_iter-{iteration}.json''' ) , os.path.join(lowerCAmelCase__ , '''eval_results_best-iteration.json''' ) , )
else:
# Assume that the last iteration is the best
logger.info('''Best iteration: %d''' , args.max_selftrain_iterations - 1 )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , lowerCAmelCase__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(lowerCAmelCase__ , f'''eval_results_iter-{args.max_selftrain_iterations - 1}.json''' ) , os.path.join(lowerCAmelCase__ , '''eval_results_best-iteration.json''' ) , )
| 11 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"facebook/vit-mae-base": "https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Tuple = 'vit_mae'
def __init__( self , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.0 , _a=0.0 , _a=0.02 , _a=1E-12 , _a=224 , _a=16 , _a=3 , _a=True , _a=16 , _a=512 , _a=8 , _a=2_048 , _a=0.75 , _a=False , **_a , ):
super().__init__(**_a )
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = initializer_range
__a = layer_norm_eps
__a = image_size
__a = patch_size
__a = num_channels
__a = qkv_bias
__a = decoder_num_attention_heads
__a = decoder_hidden_size
__a = decoder_num_hidden_layers
__a = decoder_intermediate_size
__a = mask_ratio
__a = norm_pix_loss
| 11 | 1 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : str ) -> str:
if not all(char in '''01''' for char in bin_string ):
raise ValueError('''Non-binary value was passed to the function''' )
if not bin_string:
raise ValueError('''Empty string was passed to the function''' )
__a = ''''''
while len(lowerCAmelCase__ ) % 3 != 0:
__a = '''0''' + bin_string
__a = [
bin_string[index : index + 3]
for index in range(len(lowerCAmelCase__ ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
__a = 0
for index, val in enumerate(lowerCAmelCase__ ):
oct_val += int(2 ** (2 - index) * int(lowerCAmelCase__ ) )
oct_string += str(lowerCAmelCase__ )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 11 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : str = ['image_processor', 'tokenizer']
__UpperCAmelCase : str = 'LayoutLMv3ImageProcessor'
__UpperCAmelCase : Optional[int] = ('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast')
def __init__( self , _a=None , _a=None , **_a ):
__a = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _a , )
__a = kwargs.pop('''feature_extractor''' )
__a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_a , _a )
def __call__( self , _a , _a = None , _a = None , _a = None , _a = None , _a = True , _a = False , _a = None , _a = None , _a = 0 , _a = None , _a = None , _a = None , _a = False , _a = False , _a = False , _a = False , _a = True , _a = None , **_a , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
# first, apply the image processor
__a = self.image_processor(images=_a , return_tensors=_a )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(_a , _a ):
__a = [text] # add batch dimension (as the image processor always adds a batch dimension)
__a = features['''words''']
__a = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_token_type_ids=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
# add pixel values
__a = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
__a = self.get_overflowing_images(_a , encoded_inputs['''overflow_to_sample_mapping'''] )
__a = images
return encoded_inputs
def __UpperCAmelCase ( self , _a , _a ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
__a = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(_a ) != len(_a ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f''' {len(_a )} and {len(_a )}''' )
return images_with_overflow
def __UpperCAmelCase ( self , *_a , **_a ):
return self.tokenizer.batch_decode(*_a , **_a )
def __UpperCAmelCase ( self , *_a , **_a ):
return self.tokenizer.decode(*_a , **_a )
@property
def __UpperCAmelCase ( self ):
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def __UpperCAmelCase ( self ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _a , )
return self.image_processor_class
@property
def __UpperCAmelCase ( self ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _a , )
return self.image_processor
| 11 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"weiweishi/roc-bert-base-zh": "https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json",
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = 'roc_bert'
def __init__( self , _a=30_522 , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.02 , _a=1E-12 , _a=True , _a=0 , _a="absolute" , _a=None , _a=True , _a=True , _a=768 , _a=910 , _a=512 , _a=24_858 , _a=True , **_a , ):
__a = vocab_size
__a = max_position_embeddings
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = initializer_range
__a = type_vocab_size
__a = layer_norm_eps
__a = use_cache
__a = enable_pronunciation
__a = enable_shape
__a = pronunciation_embed_dim
__a = pronunciation_vocab_size
__a = shape_embed_dim
__a = shape_vocab_size
__a = concat_input
__a = position_embedding_type
__a = classifier_dropout
super().__init__(pad_token_id=_a , **_a )
| 11 |
"""simple docstring"""
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowercase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any]=0.9_99 , lowerCAmelCase__ : List[str]="cosine" , ) -> Optional[int]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowerCAmelCase__ : int ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowerCAmelCase__ : Optional[Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
__a = []
for i in range(lowerCAmelCase__ ):
__a = i / num_diffusion_timesteps
__a = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowerCAmelCase__ ) / alpha_bar_fn(lowerCAmelCase__ ) , lowerCAmelCase__ ) )
return torch.tensor(lowerCAmelCase__ , dtype=torch.floataa )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Tuple = [e.name for e in KarrasDiffusionSchedulers]
__UpperCAmelCase : str = 2
@register_to_config
def __init__( self , _a = 1_000 , _a = 0.0_0085 , _a = 0.012 , _a = "linear" , _a = None , _a = "epsilon" , _a = "linspace" , _a = 0 , ):
if trained_betas is not None:
__a = torch.tensor(_a , dtype=torch.floataa )
elif beta_schedule == "linear":
__a = torch.linspace(_a , _a , _a , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__a = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , _a , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__a = betas_for_alpha_bar(_a )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
__a = 1.0 - self.betas
__a = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(_a , _a , _a )
def __UpperCAmelCase ( self , _a , _a=None ):
if schedule_timesteps is None:
__a = self.timesteps
__a = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
__a = 1 if len(_a ) > 1 else 0
else:
__a = timestep.cpu().item() if torch.is_tensor(_a ) else timestep
__a = self._index_counter[timestep_int]
return indices[pos].item()
@property
def __UpperCAmelCase ( self ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __UpperCAmelCase ( self , _a , _a , ):
__a = self.index_for_timestep(_a )
if self.state_in_first_order:
__a = self.sigmas[step_index]
else:
__a = self.sigmas_interpol[step_index]
__a = sample / ((sigma**2 + 1) ** 0.5)
return sample
def __UpperCAmelCase ( self , _a , _a = None , _a = None , ):
__a = num_inference_steps
__a = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
__a = np.linspace(0 , num_train_timesteps - 1 , _a , dtype=_a )[::-1].copy()
elif self.config.timestep_spacing == "leading":
__a = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__a = (np.arange(0 , _a ) * step_ratio).round()[::-1].copy().astype(_a )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
__a = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__a = (np.arange(_a , 0 , -step_ratio )).round().copy().astype(_a )
timesteps -= 1
else:
raise ValueError(
f'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
__a = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
__a = torch.from_numpy(np.log(_a ) ).to(_a )
__a = np.interp(_a , np.arange(0 , len(_a ) ) , _a )
__a = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
__a = torch.from_numpy(_a ).to(device=_a )
# interpolate sigmas
__a = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
__a = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
__a = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(_a ).startswith('''mps''' ):
# mps does not support float64
__a = torch.from_numpy(_a ).to(_a , dtype=torch.floataa )
else:
__a = torch.from_numpy(_a ).to(_a )
# interpolate timesteps
__a = self.sigma_to_t(_a ).to(_a , dtype=timesteps.dtype )
__a = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
__a = torch.cat([timesteps[:1], interleaved_timesteps] )
__a = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
__a = defaultdict(_a )
def __UpperCAmelCase ( self , _a ):
# get log sigma
__a = sigma.log()
# get distribution
__a = log_sigma - self.log_sigmas[:, None]
# get sigmas range
__a = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
__a = low_idx + 1
__a = self.log_sigmas[low_idx]
__a = self.log_sigmas[high_idx]
# interpolate sigmas
__a = (low - log_sigma) / (low - high)
__a = w.clamp(0 , 1 )
# transform interpolation to time range
__a = (1 - w) * low_idx + w * high_idx
__a = t.view(sigma.shape )
return t
@property
def __UpperCAmelCase ( self ):
return self.sample is None
def __UpperCAmelCase ( self , _a , _a , _a , _a = True , ):
__a = self.index_for_timestep(_a )
# advance index counter by 1
__a = timestep.cpu().item() if torch.is_tensor(_a ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
__a = self.sigmas[step_index]
__a = self.sigmas_interpol[step_index + 1]
__a = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
__a = self.sigmas[step_index - 1]
__a = self.sigmas_interpol[step_index]
__a = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
__a = 0
__a = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
__a = sigma_hat if self.state_in_first_order else sigma_interpol
__a = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
__a = sigma_hat if self.state_in_first_order else sigma_interpol
__a = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError('''prediction_type not implemented yet: sample''' )
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
__a = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
__a = sigma_interpol - sigma_hat
# store for 2nd order step
__a = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
__a = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
__a = sigma_next - sigma_hat
__a = self.sample
__a = None
__a = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_a )
def __UpperCAmelCase ( self , _a , _a , _a , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__a = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(_a ):
# mps does not support float64
__a = self.timesteps.to(original_samples.device , dtype=torch.floataa )
__a = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
__a = self.timesteps.to(original_samples.device )
__a = timesteps.to(original_samples.device )
__a = [self.index_for_timestep(_a , _a ) for t in timesteps]
__a = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
__a = sigma.unsqueeze(-1 )
__a = original_samples + noise * sigma
return noisy_samples
def __len__( self ):
return self.config.num_train_timesteps
| 11 | 1 |
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def lowercase ( lowerCAmelCase__ : List[Any] ) -> Optional[Any]:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4_e_0_0 and cp <= 0x9_f_f_f)
or (cp >= 0x3_4_0_0 and cp <= 0x4_d_b_f) #
or (cp >= 0x2_0_0_0_0 and cp <= 0x2_a_6_d_f) #
or (cp >= 0x2_a_7_0_0 and cp <= 0x2_b_7_3_f) #
or (cp >= 0x2_b_7_4_0 and cp <= 0x2_b_8_1_f) #
or (cp >= 0x2_b_8_2_0 and cp <= 0x2_c_e_a_f) #
or (cp >= 0xf_9_0_0 and cp <= 0xf_a_f_f)
or (cp >= 0x2_f_8_0_0 and cp <= 0x2_f_a_1_f) #
): #
return True
return False
def lowercase ( lowerCAmelCase__ : str ) -> Dict:
# word like '180' or '身高' or '神'
for char in word:
__a = ord(lowerCAmelCase__ )
if not _is_chinese_char(lowerCAmelCase__ ):
return 0
return 1
def lowercase ( lowerCAmelCase__ : List[str] ) -> Optional[Any]:
__a = set()
for token in tokens:
__a = len(lowerCAmelCase__ ) > 1 and is_chinese(lowerCAmelCase__ )
if chinese_word:
word_set.add(lowerCAmelCase__ )
__a = list(lowerCAmelCase__ )
return word_list
def lowercase ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : set() ) -> int:
if not chinese_word_set:
return bert_tokens
__a = max([len(lowerCAmelCase__ ) for w in chinese_word_set] )
__a = bert_tokens
__a , __a = 0, len(lowerCAmelCase__ )
while start < end:
__a = True
if is_chinese(bert_word[start] ):
__a = min(end - start , lowerCAmelCase__ )
for i in range(lowerCAmelCase__ , 1 , -1 ):
__a = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
__a = '''##''' + bert_word[j]
__a = start + i
__a = False
break
if single_word:
start += 1
return bert_word
def lowercase ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : LTP , lowerCAmelCase__ : BertTokenizer ) -> Dict:
__a = []
for i in range(0 , len(lowerCAmelCase__ ) , 100 ):
__a = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=['''cws'''] ).cws
__a = [get_chinese_word(lowerCAmelCase__ ) for r in res]
ltp_res.extend(lowerCAmelCase__ )
assert len(lowerCAmelCase__ ) == len(lowerCAmelCase__ )
__a = []
for i in range(0 , len(lowerCAmelCase__ ) , 100 ):
__a = bert_tokenizer(lines[i : i + 100] , add_special_tokens=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=512 )
bert_res.extend(res['''input_ids'''] )
assert len(lowerCAmelCase__ ) == len(lowerCAmelCase__ )
__a = []
for input_ids, chinese_word in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
__a = []
for id in input_ids:
__a = bert_tokenizer._convert_id_to_token(lowerCAmelCase__ )
input_tokens.append(lowerCAmelCase__ )
__a = add_sub_symbol(lowerCAmelCase__ , lowerCAmelCase__ )
__a = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(lowerCAmelCase__ ):
if token[:2] == "##":
__a = token[2:]
# save chinese tokens' pos
if len(lowerCAmelCase__ ) == 1 and _is_chinese_char(ord(lowerCAmelCase__ ) ):
ref_id.append(lowerCAmelCase__ )
ref_ids.append(lowerCAmelCase__ )
assert len(lowerCAmelCase__ ) == len(lowerCAmelCase__ )
return ref_ids
def lowercase ( lowerCAmelCase__ : List[str] ) -> int:
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f:
__a = f.readlines()
__a = [line.strip() for line in data if len(lowerCAmelCase__ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
__a = LTP(args.ltp ) # faster in GPU device
__a = BertTokenizer.from_pretrained(args.bert )
__a = prepare_ref(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f:
__a = [json.dumps(lowerCAmelCase__ ) + '''\n''' for ref in ref_ids]
f.writelines(lowerCAmelCase__ )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
required=False,
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp",
required=False,
type=str,
default="./resources/ltp",
help="resources for LTP tokenizer, usually a path",
)
parser.add_argument(
"--bert",
required=False,
type=str,
default="./resources/robert",
help="resources for Bert tokenizer",
)
parser.add_argument(
"--save_path",
required=False,
type=str,
default="./resources/ref.txt",
help="path to save res",
)
lowercase_ = parser.parse_args()
main(args)
| 11 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"RWKV/rwkv-4-169m-pile": "https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json",
"RWKV/rwkv-4-430m-pile": "https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json",
"RWKV/rwkv-4-1b5-pile": "https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json",
"RWKV/rwkv-4-3b-pile": "https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json",
"RWKV/rwkv-4-7b-pile": "https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json",
"RWKV/rwkv-4-14b-pile": "https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json",
"RWKV/rwkv-raven-1b5": "https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json",
"RWKV/rwkv-raven-3b": "https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json",
"RWKV/rwkv-raven-7b": "https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json",
"RWKV/rwkv-raven-14b": "https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json",
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Any = 'rwkv'
__UpperCAmelCase : Optional[Any] = {'max_position_embeddings': 'context_length'}
def __init__( self , _a=50_277 , _a=1_024 , _a=4_096 , _a=32 , _a=None , _a=None , _a=1E-5 , _a=0 , _a=0 , _a=6 , _a=False , _a=True , **_a , ):
__a = vocab_size
__a = context_length
__a = hidden_size
__a = num_hidden_layers
__a = attention_hidden_size if attention_hidden_size is not None else hidden_size
__a = intermediate_size if intermediate_size is not None else 4 * hidden_size
__a = layer_norm_epsilon
__a = rescale_every
__a = use_cache
__a = bos_token_id
__a = eos_token_id
super().__init__(
tie_word_embeddings=_a , bos_token_id=_a , eos_token_id=_a , **_a )
| 11 | 1 |
"""simple docstring"""
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def lowercase ( lowerCAmelCase__ : str=None , lowerCAmelCase__ : Optional[int]=None ) -> Tuple:
return field(default_factory=lambda: default , metadata=lowerCAmelCase__ )
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
__UpperCAmelCase : str = field(
metadata={'help': 'The csv file to plot.'} , )
__UpperCAmelCase : bool = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Whether to plot along batch size or sequence length. Defaults to sequence length.'} , )
__UpperCAmelCase : bool = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Whether the csv file has time results or memory results. Defaults to memory results.'} , )
__UpperCAmelCase : bool = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Disable logarithmic scale when plotting'} , )
__UpperCAmelCase : bool = field(
default=__SCREAMING_SNAKE_CASE , metadata={
'help': 'Whether the csv file has training results or inference results. Defaults to inference results.'
} , )
__UpperCAmelCase : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Filename under which the plot will be saved. If unused no plot is saved.'} , )
__UpperCAmelCase : Optional[List[str]] = list_field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'List of model names that are used instead of the ones in the csv file.'} )
def lowercase ( lowerCAmelCase__ : Dict ) -> Any:
try:
int(lowerCAmelCase__ )
return True
except ValueError:
return False
def lowercase ( lowerCAmelCase__ : Optional[int] ) -> Optional[Any]:
try:
float(lowerCAmelCase__ )
return True
except ValueError:
return False
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a ):
__a = args
__a = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline='''''' ) as csv_file:
__a = csv.DictReader(_a )
for row in reader:
__a = row['''model''']
self.result_dict[model_name]["bsz"].append(int(row['''batch_size'''] ) )
self.result_dict[model_name]["seq_len"].append(int(row['''sequence_length'''] ) )
if can_convert_to_int(row['''result'''] ):
# value is not None
__a = int(row['''result'''] )
elif can_convert_to_float(row['''result'''] ):
# value is not None
__a = float(row['''result'''] )
def __UpperCAmelCase ( self ):
__a , __a = plt.subplots()
__a = '''Time usage''' if self.args.is_time else '''Memory usage'''
__a = title_str + ''' for training''' if self.args.is_train else title_str + ''' for inference'''
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale('''log''' )
ax.set_yscale('''log''' )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
__a = sorted(set(self.result_dict[model_name]['''bsz'''] ) )
__a = sorted(set(self.result_dict[model_name]['''seq_len'''] ) )
__a = self.result_dict[model_name]['''result''']
((__a) , (__a)) = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
__a = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
__a = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=_a , )
else:
__a = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((__a) , (__a)) = (
('''batch_size''', '''len''') if self.args.plot_along_batch else ('''in #tokens''', '''bsz''')
)
__a = np.asarray(_a , _a )[: len(_a )]
plt.scatter(
_a , _a , label=f'''{label_model_name} - {inner_loop_label}: {inner_loop_value}''' )
plt.plot(_a , _a , '''--''' )
title_str += f''' {label_model_name} vs.'''
__a = title_str[:-4]
__a = '''Time in s''' if self.args.is_time else '''Memory in MB'''
# plot
plt.title(_a )
plt.xlabel(_a )
plt.ylabel(_a )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def lowercase ( ) -> List[str]:
__a = HfArgumentParser(lowerCAmelCase__ )
__a = parser.parse_args_into_dataclasses()[0]
__a = Plot(args=lowerCAmelCase__ )
plot.plot()
if __name__ == "__main__":
main()
| 11 |
"""simple docstring"""
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : List[str] = (UnCLIPScheduler,)
def __UpperCAmelCase ( self , **_a ):
__a = {
'''num_train_timesteps''': 1_000,
'''variance_type''': '''fixed_small_log''',
'''clip_sample''': True,
'''clip_sample_range''': 1.0,
'''prediction_type''': '''epsilon''',
}
config.update(**_a )
return config
def __UpperCAmelCase ( self ):
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_a )
def __UpperCAmelCase ( self ):
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=_a )
def __UpperCAmelCase ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_a )
def __UpperCAmelCase ( self ):
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=_a )
def __UpperCAmelCase ( self ):
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=_a )
def __UpperCAmelCase ( self ):
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=_a , prev_timestep=_a )
def __UpperCAmelCase ( self ):
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config(variance_type='''fixed_small_log''' )
__a = scheduler_class(**_a )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0_000E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.054_9625 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.999_4987 ) ) < 1E-5
def __UpperCAmelCase ( self ):
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config(variance_type='''learned_range''' )
__a = scheduler_class(**_a )
__a = 0.5
assert scheduler._get_variance(1 , predicted_variance=_a ) - -10.171_2790 < 1E-5
assert scheduler._get_variance(487 , predicted_variance=_a ) - -5.799_8052 < 1E-5
assert scheduler._get_variance(999 , predicted_variance=_a ) - -0.001_0011 < 1E-5
def __UpperCAmelCase ( self ):
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**_a )
__a = scheduler.timesteps
__a = self.dummy_model()
__a = self.dummy_sample_deter
__a = torch.manual_seed(0 )
for i, t in enumerate(_a ):
# 1. predict noise residual
__a = model(_a , _a )
# 2. predict previous mean of sample x_t-1
__a = scheduler.step(_a , _a , _a , generator=_a ).prev_sample
__a = pred_prev_sample
__a = torch.sum(torch.abs(_a ) )
__a = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 252.268_2495 ) < 1E-2
assert abs(result_mean.item() - 0.328_4743 ) < 1E-3
def __UpperCAmelCase ( self ):
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**_a )
scheduler.set_timesteps(25 )
__a = scheduler.timesteps
__a = self.dummy_model()
__a = self.dummy_sample_deter
__a = torch.manual_seed(0 )
for i, t in enumerate(_a ):
# 1. predict noise residual
__a = model(_a , _a )
if i + 1 == timesteps.shape[0]:
__a = None
else:
__a = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
__a = scheduler.step(
_a , _a , _a , prev_timestep=_a , generator=_a ).prev_sample
__a = pred_prev_sample
__a = torch.sum(torch.abs(_a ) )
__a = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 258.204_4983 ) < 1E-2
assert abs(result_mean.item() - 0.336_2038 ) < 1E-3
def __UpperCAmelCase ( self ):
pass
def __UpperCAmelCase ( self ):
pass
| 11 | 1 |
"""simple docstring"""
from __future__ import annotations
lowercase_ = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def lowercase ( lowerCAmelCase__ : list[list[int]] , lowerCAmelCase__ : list[int] , lowerCAmelCase__ : list[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : list[list[int]] , ) -> tuple[list[list[int]], list[list[int]]]:
__a = [
[0 for col in range(len(grid[0] ) )] for row in range(len(lowerCAmelCase__ ) )
] # the reference grid
__a = 1
__a = [
[0 for col in range(len(grid[0] ) )] for row in range(len(lowerCAmelCase__ ) )
] # the action grid
__a = init[0]
__a = init[1]
__a = 0
__a = g + heuristic[x][y] # cost from starting cell to destination cell
__a = [[f, g, x, y]]
__a = False # flag that is set when search is complete
__a = False # flag set if we can't find expand
while not found and not resign:
if len(lowerCAmelCase__ ) == 0:
raise ValueError('''Algorithm is unable to find solution''' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
__a = cell.pop()
__a = next_cell[2]
__a = next_cell[3]
__a = next_cell[1]
if x == goal[0] and y == goal[1]:
__a = True
else:
for i in range(len(lowerCAmelCase__ ) ): # to try out different valid actions
__a = x + DIRECTIONS[i][0]
__a = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(lowerCAmelCase__ ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
__a = g + cost
__a = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
__a = 1
__a = i
__a = []
__a = goal[0]
__a = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
__a = x - DIRECTIONS[action[x][y]][0]
__a = y - DIRECTIONS[action[x][y]][1]
__a = xa
__a = ya
invpath.append([x, y] )
__a = []
for i in range(len(lowerCAmelCase__ ) ):
path.append(invpath[len(lowerCAmelCase__ ) - 1 - i] )
return path, action
if __name__ == "__main__":
lowercase_ = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
lowercase_ = [0, 0]
# all coordinates are given in format [y,x]
lowercase_ = [len(grid) - 1, len(grid[0]) - 1]
lowercase_ = 1
# the cost map which pushes the path closer to the goal
lowercase_ = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
lowercase_ = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
lowercase_ = 9_9
lowercase_ , lowercase_ = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 11 |
"""simple docstring"""
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def lowercase ( lowerCAmelCase__ : Dict ) -> Optional[int]:
__a , __a = image.size
__a , __a = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
__a = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] )
__a = np.array(lowerCAmelCase__ ).astype(np.floataa ) / 2_55.0
__a = image[None].transpose(0 , 3 , 1 , 2 )
__a = torch.from_numpy(lowerCAmelCase__ )
return 2.0 * image - 1.0
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a , _a , _a , ):
super().__init__()
self.register_modules(vqvae=_a , unet=_a , scheduler=_a )
@torch.no_grad()
def __call__( self , _a = None , _a = 1 , _a = 100 , _a = 0.0 , _a = None , _a = "pil" , _a = True , ):
if isinstance(_a , PIL.Image.Image ):
__a = 1
elif isinstance(_a , torch.Tensor ):
__a = image.shape[0]
else:
raise ValueError(f'''`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(_a )}''' )
if isinstance(_a , PIL.Image.Image ):
__a = preprocess(_a )
__a , __a = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
__a = (batch_size, self.unet.config.in_channels // 2, height, width)
__a = next(self.unet.parameters() ).dtype
__a = randn_tensor(_a , generator=_a , device=self.device , dtype=_a )
__a = image.to(device=self.device , dtype=_a )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(_a , device=self.device )
__a = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
__a = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__a = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__a = {}
if accepts_eta:
__a = eta
for t in self.progress_bar(_a ):
# concat latents and low resolution image in the channel dimension.
__a = torch.cat([latents, image] , dim=1 )
__a = self.scheduler.scale_model_input(_a , _a )
# predict the noise residual
__a = self.unet(_a , _a ).sample
# compute the previous noisy sample x_t -> x_t-1
__a = self.scheduler.step(_a , _a , _a , **_a ).prev_sample
# decode the image latents with the VQVAE
__a = self.vqvae.decode(_a ).sample
__a = torch.clamp(_a , -1.0 , 1.0 )
__a = image / 2 + 0.5
__a = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__a = self.numpy_to_pil(_a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_a )
| 11 | 1 |
"""simple docstring"""
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def lowercase ( ) -> Optional[int]:
__a = argparse.ArgumentParser()
parser.add_argument('''--model_ckpt''' , type=lowerCAmelCase__ , default='''microsoft/unixcoder-base-nine''' )
parser.add_argument('''--num_epochs''' , type=lowerCAmelCase__ , default=5 )
parser.add_argument('''--batch_size''' , type=lowerCAmelCase__ , default=6 )
parser.add_argument('''--gradient_accumulation_steps''' , type=lowerCAmelCase__ , default=1 )
parser.add_argument('''--freeze''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ )
parser.add_argument('''--learning_rate''' , type=lowerCAmelCase__ , default=5e-4 )
parser.add_argument('''--seed''' , type=lowerCAmelCase__ , default=0 )
parser.add_argument('''--lr_scheduler_type''' , type=lowerCAmelCase__ , default='''cosine''' )
parser.add_argument('''--num_warmup_steps''' , type=lowerCAmelCase__ , default=10 )
parser.add_argument('''--weight_decay''' , type=lowerCAmelCase__ , default=0.01 )
parser.add_argument('''--output_dir''' , type=lowerCAmelCase__ , default='''./results''' )
return parser.parse_args()
lowercase_ = load("accuracy")
def lowercase ( lowerCAmelCase__ : Optional[Any] ) -> List[str]:
__a , __a = eval_pred
__a = np.argmax(lowerCAmelCase__ , axis=1 )
return metric.compute(predictions=lowerCAmelCase__ , references=lowerCAmelCase__ )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a ):
super().__init__()
__a = trainer
def __UpperCAmelCase ( self , _a , _a , _a , **_a ):
if control.should_evaluate:
__a = deepcopy(_a )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix='''train''' )
return control_copy
def lowercase ( ) -> List[str]:
__a = get_args()
set_seed(args.seed )
__a = load_dataset('''codeparrot/codecomplex''' , split='''train''' )
__a = dataset.train_test_split(test_size=0.2 )
__a = train_test['''test'''].train_test_split(test_size=0.5 )
__a = DatasetDict(
{
'''train''': train_test['''train'''],
'''test''': test_validation['''train'''],
'''valid''': test_validation['''test'''],
} )
print('''Loading tokenizer and model''' )
__a = AutoTokenizer.from_pretrained(args.model_ckpt )
__a = tokenizer.eos_token
__a = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
__a = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
__a = False
__a = ClassLabel(num_classes=7 , names=list(set(train_test_validation['''train''']['''complexity'''] ) ) )
def tokenize(lowerCAmelCase__ : int ):
__a = tokenizer(example['''src'''] , truncation=lowerCAmelCase__ , max_length=1024 )
__a = labels.straint(example['''complexity'''] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
__a = train_test_validation.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , remove_columns=train_test_validation['''train'''].column_names , )
__a = DataCollatorWithPadding(tokenizer=lowerCAmelCase__ )
__a = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy='''epoch''' , save_strategy='''epoch''' , logging_strategy='''epoch''' , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model='''accuracy''' , run_name='''complexity-java''' , report_to='''wandb''' , )
__a = Trainer(
model=lowerCAmelCase__ , args=lowerCAmelCase__ , train_dataset=tokenized_datasets['''train'''] , eval_dataset=tokenized_datasets['''valid'''] , tokenizer=lowerCAmelCase__ , data_collator=lowerCAmelCase__ , compute_metrics=lowerCAmelCase__ , )
print('''Training...''' )
trainer.add_callback(CustomCallback(lowerCAmelCase__ ) )
trainer.train()
if __name__ == "__main__":
main()
| 11 |
"""simple docstring"""
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Dict = DistilBertTokenizer
__UpperCAmelCase : Any = DistilBertTokenizerFast
__UpperCAmelCase : int = True
@slow
def __UpperCAmelCase ( self ):
__a = DistilBertTokenizer.from_pretrained('''distilbert-base-uncased''' )
__a = tokenizer.encode('''sequence builders''' , add_special_tokens=_a )
__a = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_a )
__a = tokenizer.build_inputs_with_special_tokens(_a )
__a = tokenizer.build_inputs_with_special_tokens(_a , _a )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 11 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FocalNetForImageClassification",
"FocalNetForMaskedImageModeling",
"FocalNetBackbone",
"FocalNetModel",
"FocalNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 11 |
"""simple docstring"""
from math import factorial, radians
def lowercase ( lowerCAmelCase__ : float , lowerCAmelCase__ : int = 18 , lowerCAmelCase__ : int = 10 ) -> float:
__a = angle_in_degrees - ((angle_in_degrees // 3_60.0) * 3_60.0)
# Converting from degrees to radians
__a = radians(lowerCAmelCase__ )
__a = angle_in_radians
__a = 3
__a = -1
for _ in range(lowerCAmelCase__ ):
result += (b * (angle_in_radians**a)) / factorial(lowerCAmelCase__ )
__a = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
__import__("doctest").testmod()
| 11 | 1 |
"""simple docstring"""
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
__a = '''hf-internal-testing/tiny-random-t5'''
__a = AutoTokenizer.from_pretrained(_a )
__a = AutoModelForSeqaSeqLM.from_pretrained(_a )
__a = tokenizer('''This is me''' , return_tensors='''pt''' )
__a = model.to_bettertransformer()
self.assertTrue(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
__a = model.generate(**_a )
__a = model.reverse_bettertransformer()
self.assertFalse(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_a )
__a = AutoModelForSeqaSeqLM.from_pretrained(_a )
self.assertFalse(
any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
__a = model_reloaded.generate(**_a )
self.assertTrue(torch.allclose(_a , _a ) )
def __UpperCAmelCase ( self ):
__a = '''hf-internal-testing/tiny-random-t5'''
__a = AutoModelForSeqaSeqLM.from_pretrained(_a )
__a = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(_a ):
model.save_pretrained(_a )
__a = model.reverse_bettertransformer()
model.save_pretrained(_a )
| 11 |
"""simple docstring"""
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def lowercase ( lowerCAmelCase__ : dict ) -> tuple:
return (data["data"], data["target"])
def lowercase ( lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : np.ndarray ) -> np.ndarray:
__a = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(lowerCAmelCase__ , lowerCAmelCase__ )
# Predict target for test data
__a = xgb.predict(lowerCAmelCase__ )
__a = predictions.reshape(len(lowerCAmelCase__ ) , 1 )
return predictions
def lowercase ( ) -> None:
__a = fetch_california_housing()
__a , __a = data_handling(lowerCAmelCase__ )
__a , __a , __a , __a = train_test_split(
lowerCAmelCase__ , lowerCAmelCase__ , test_size=0.25 , random_state=1 )
__a = xgboost(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Error printing
print(f'''Mean Absolute Error : {mean_absolute_error(lowerCAmelCase__ , lowerCAmelCase__ )}''' )
print(f'''Mean Square Error : {mean_squared_error(lowerCAmelCase__ , lowerCAmelCase__ )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 11 | 1 |
"""simple docstring"""
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
lowercase_ = object()
# For specifying empty leaf dict `{}`
lowercase_ = object()
def lowercase ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[str] ) -> Optional[int]:
__a = tuple((re.compile(x + '''$''' ) for x in qs) )
for i in range(len(lowerCAmelCase__ ) - len(lowerCAmelCase__ ) + 1 ):
__a = [x.match(lowerCAmelCase__ ) for x, y in zip(lowerCAmelCase__ , ks[i:] )]
if matches and all(lowerCAmelCase__ ):
return True
return False
def lowercase ( lowerCAmelCase__ : List[Any] ) -> Any:
def replace(lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Any ):
for rule, replacement in rules:
if _match(lowerCAmelCase__ , lowerCAmelCase__ ):
return replacement
return val
return replace
def lowercase ( ) -> List[Any]:
return [
# embeddings
(("transformer", "wpe", "embedding"), P('''mp''' , lowerCAmelCase__ )),
(("transformer", "wte", "embedding"), P('''mp''' , lowerCAmelCase__ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(lowerCAmelCase__ , '''mp''' )),
(("attention", "out_proj", "kernel"), P('''mp''' , lowerCAmelCase__ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(lowerCAmelCase__ , '''mp''' )),
(("mlp", "c_fc", "bias"), P('''mp''' )),
(("mlp", "c_proj", "kernel"), P('''mp''' , lowerCAmelCase__ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def lowercase ( lowerCAmelCase__ : Dict ) -> Optional[int]:
__a = _get_partition_rules()
__a = _replacement_rules(lowerCAmelCase__ )
__a = {k: _unmatched for k in flatten_dict(lowerCAmelCase__ )}
__a = {k: replace(lowerCAmelCase__ , lowerCAmelCase__ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(lowerCAmelCase__ ) )
| 11 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase_ = {
"configuration_efficientformer": [
"EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientFormerConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["EfficientFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientFormerForImageClassification",
"EfficientFormerForImageClassificationWithTeacher",
"EfficientFormerModel",
"EfficientFormerPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFEfficientFormerForImageClassification",
"TFEfficientFormerForImageClassificationWithTeacher",
"TFEfficientFormerModel",
"TFEfficientFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 11 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"EleutherAI/gpt-neo-1.3B": "https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = 'gpt_neo'
__UpperCAmelCase : Optional[Any] = ['past_key_values']
__UpperCAmelCase : Dict = {'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self , _a=50_257 , _a=2_048 , _a=2_048 , _a=24 , _a=[[["global", "local"], 12]] , _a=16 , _a=None , _a=256 , _a="gelu_new" , _a=0.0 , _a=0.0 , _a=0.0 , _a=0.1 , _a=1E-5 , _a=0.02 , _a=True , _a=50_256 , _a=50_256 , **_a , ):
__a = vocab_size
__a = max_position_embeddings
__a = hidden_size
__a = num_layers
__a = num_heads
__a = intermediate_size
__a = window_size
__a = activation_function
__a = resid_dropout
__a = embed_dropout
__a = attention_dropout
__a = classifier_dropout
__a = layer_norm_epsilon
__a = initializer_range
__a = use_cache
__a = bos_token_id
__a = eos_token_id
__a = attention_types
__a = self.expand_attention_types_params(_a )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
'''Configuration for convolutional module is incorrect. '''
'''It is required that `len(config.attention_layers)` == `config.num_layers` '''
f'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, '''
f'''`config.num_layers = {self.num_layers}`. '''
'''`config.attention_layers` is prepared using `config.attention_types`. '''
'''Please verify the value of `config.attention_types` argument.''' )
super().__init__(bos_token_id=_a , eos_token_id=_a , **_a )
@staticmethod
def __UpperCAmelCase ( _a ):
__a = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def lowercase ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[int] ) -> int:
import torch
__a = input.size()
__a = len(lowerCAmelCase__ )
__a = shape[dimension]
__a = torch.arange(0 , lowerCAmelCase__ , lowerCAmelCase__ )
__a = torch.div(sizedim - size , lowerCAmelCase__ , rounding_mode='''floor''' ) + 1
__a = torch.arange(lowerCAmelCase__ ) + low_indices[:min_length][:, None]
__a = [slice(lowerCAmelCase__ )] * rank
__a = indices
__a = input[s]
__a = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[Any] ) -> Tuple:
import torch
__a = torch.arange(1 , lowerCAmelCase__ )
__a = torch.remainder(lowerCAmelCase__ , lowerCAmelCase__ )
__a = remainders == 0
__a = candidates[divisor_indices]
__a = torch.max(lowerCAmelCase__ )
return largest_divisor, torch.div(lowerCAmelCase__ , lowerCAmelCase__ , rounding_mode='''floor''' )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def __UpperCAmelCase ( self ):
__a = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(_a , direction='''inputs''' )
__a = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
__a = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def __UpperCAmelCase ( self ):
return self._config.num_heads
def __UpperCAmelCase ( self , _a , _a = -1 , _a = -1 , _a = False , _a = None , ):
__a = super(_a , self ).generate_dummy_inputs(
_a , batch_size=_a , seq_length=_a , is_pair=_a , framework=_a )
# We need to order the input in the way they appears in the forward()
__a = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__a , __a = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
__a = seqlen + 2
__a = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__a = [
(torch.zeros(_a ), torch.zeros(_a )) for _ in range(self.num_layers )
]
__a = common_inputs['''attention_mask''']
if self.use_past:
__a = ordered_inputs['''attention_mask'''].dtype
__a = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(_a , _a , dtype=_a )] , dim=1 )
return ordered_inputs
@property
def __UpperCAmelCase ( self ):
return 13
| 11 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/config.json",
"funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json",
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/config.json",
"funnel-transformer/medium-base": "https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json",
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/config.json",
"funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json",
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json",
"funnel-transformer/xlarge-base": "https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json",
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = 'funnel'
__UpperCAmelCase : Union[str, Any] = {
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
}
def __init__( self , _a=30_522 , _a=[4, 4, 4] , _a=None , _a=2 , _a=768 , _a=12 , _a=64 , _a=3_072 , _a="gelu_new" , _a=0.1 , _a=0.1 , _a=0.0 , _a=0.1 , _a=None , _a=1E-9 , _a="mean" , _a="relative_shift" , _a=True , _a=True , _a=True , **_a , ):
__a = vocab_size
__a = block_sizes
__a = [1] * len(_a ) if block_repeats is None else block_repeats
assert len(_a ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
__a = num_decoder_layers
__a = d_model
__a = n_head
__a = d_head
__a = d_inner
__a = hidden_act
__a = hidden_dropout
__a = attention_dropout
__a = activation_dropout
__a = initializer_range
__a = initializer_std
__a = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], f'''Got {pooling_type} for `pooling_type` but only \'mean\' and \'max\' are supported.'''
__a = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], f'''Got {attention_type} for `attention_type` but only \'relative_shift\' and \'factorized\' are supported.'''
__a = attention_type
__a = separate_cls
__a = truncate_seq
__a = pool_q_only
super().__init__(**_a )
@property
def __UpperCAmelCase ( self ):
return sum(self.block_sizes )
@num_hidden_layers.setter
def __UpperCAmelCase ( self , _a ):
raise NotImplementedError(
'''This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.''' )
@property
def __UpperCAmelCase ( self ):
return len(self.block_sizes )
@num_blocks.setter
def __UpperCAmelCase ( self , _a ):
raise NotImplementedError('''This model does not support the setting of `num_blocks`. Please set `block_sizes`.''' )
| 11 | 1 |
"""simple docstring"""
from __future__ import annotations
lowercase_ = "#"
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self ):
__a = {}
def __UpperCAmelCase ( self , _a ):
__a = self._trie
for char in text:
if char not in trie:
__a = {}
__a = trie[char]
__a = True
def __UpperCAmelCase ( self , _a ):
__a = self._trie
for char in prefix:
if char in trie:
__a = trie[char]
else:
return []
return self._elements(_a )
def __UpperCAmelCase ( self , _a ):
__a = []
for c, v in d.items():
__a = [''' '''] if c == END else [(c + s) for s in self._elements(_a )]
result.extend(_a )
return tuple(_a )
lowercase_ = Trie()
lowercase_ = ("depart", "detergent", "daring", "dog", "deer", "deal")
for word in words:
trie.insert_word(word)
def lowercase ( lowerCAmelCase__ : str ) -> tuple:
__a = trie.find_word(lowerCAmelCase__ )
return tuple(string + word for word in suffixes )
def lowercase ( ) -> None:
print(autocomplete_using_trie('''de''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 11 |
"""simple docstring"""
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
__a = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_a , '''width_multiplier''' ) )
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a , _a=13 , _a=64 , _a=2 , _a=3 , _a="swish" , _a=3 , _a=32 , _a=0.1 , _a=0.02 , _a=True , _a=True , _a=10 , _a=None , _a=0.25 , _a=0.0 , _a=0.0 , ):
__a = parent
__a = batch_size
__a = image_size
__a = patch_size
__a = num_channels
__a = make_divisible(512 * width_multiplier , divisor=8 )
__a = hidden_act
__a = conv_kernel_size
__a = output_stride
__a = classifier_dropout_prob
__a = use_labels
__a = is_training
__a = num_labels
__a = initializer_range
__a = scope
__a = width_multiplier
__a = ffn_dropout
__a = attn_dropout
def __UpperCAmelCase ( self ):
__a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.num_labels )
__a = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__a = self.get_config()
return config, pixel_values, labels, pixel_labels
def __UpperCAmelCase ( self ):
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def __UpperCAmelCase ( self , _a , _a , _a , _a ):
__a = MobileViTVaModel(config=_a )
model.to(_a )
model.eval()
__a = model(_a )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __UpperCAmelCase ( self , _a , _a , _a , _a ):
__a = self.num_labels
__a = MobileViTVaForImageClassification(_a )
model.to(_a )
model.eval()
__a = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self , _a , _a , _a , _a ):
__a = self.num_labels
__a = MobileViTVaForSemanticSegmentation(_a )
model.to(_a )
model.eval()
__a = model(_a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__a = model(_a , labels=_a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __UpperCAmelCase ( self ):
__a = self.prepare_config_and_inputs()
__a , __a , __a , __a = config_and_inputs
__a = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
__UpperCAmelCase : Union[str, Any] = (
{
'feature-extraction': MobileViTVaModel,
'image-classification': MobileViTVaForImageClassification,
'image-segmentation': MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : Union[str, Any] = False
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : List[str] = False
def __UpperCAmelCase ( self ):
__a = MobileViTVaModelTester(self )
__a = MobileViTVaConfigTester(self , config_class=_a , has_text_modality=_a )
def __UpperCAmelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileViTV2 does not use inputs_embeds''' )
def __UpperCAmelCase ( self ):
pass
@unittest.skip(reason='''MobileViTV2 does not support input and output embeddings''' )
def __UpperCAmelCase ( self ):
pass
@unittest.skip(reason='''MobileViTV2 does not output attentions''' )
def __UpperCAmelCase ( self ):
pass
@require_torch_multi_gpu
@unittest.skip(reason='''Got `CUDA error: misaligned address` for tests after this one being run.''' )
def __UpperCAmelCase ( self ):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __UpperCAmelCase ( self ):
pass
def __UpperCAmelCase ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(_a )
__a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __UpperCAmelCase ( self ):
def check_hidden_states_output(_a , _a , _a ):
__a = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(_a , _a ) )
__a = outputs.hidden_states
__a = 5
self.assertEqual(len(_a ) , _a )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__a = 2
for i in range(len(_a ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a = True
check_hidden_states_output(_a , _a , _a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_a )
@slow
def __UpperCAmelCase ( self ):
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = MobileViTVaModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def lowercase ( ) -> str:
__a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCAmelCase ( self ):
return (
MobileViTImageProcessor.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' )
if is_vision_available()
else None
)
@slow
def __UpperCAmelCase ( self ):
__a = MobileViTVaForImageClassification.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' ).to(
_a )
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
__a = model(**_a )
# verify the logits
__a = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , _a )
__a = torch.tensor([-1.6_336E00, -7.3_204E-02, -5.1_883E-01] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1E-4 ) )
@slow
def __UpperCAmelCase ( self ):
__a = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
__a = model.to(_a )
__a = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
__a = prepare_img()
__a = image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
__a = model(**_a )
__a = outputs.logits
# verify the logits
__a = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , _a )
__a = torch.tensor(
[
[[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]],
[[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]],
[[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]],
] , device=_a , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _a , atol=1E-4 ) )
@slow
def __UpperCAmelCase ( self ):
__a = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
__a = model.to(_a )
__a = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
__a = prepare_img()
__a = image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
__a = model(**_a )
__a = outputs.logits.detach().cpu()
__a = image_processor.post_process_semantic_segmentation(outputs=_a , target_sizes=[(50, 60)] )
__a = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , _a )
__a = image_processor.post_process_semantic_segmentation(outputs=_a )
__a = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , _a )
| 11 | 1 |
"""simple docstring"""
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
lowercase_ = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
lowercase_ = typing.Union[np.floataa, int, float] # noqa: UP007
def lowercase ( lowerCAmelCase__ : Vector , lowerCAmelCase__ : Vector ) -> VectorOut:
return np.sqrt(np.sum((np.asarray(lowerCAmelCase__ ) - np.asarray(lowerCAmelCase__ )) ** 2 ) )
def lowercase ( lowerCAmelCase__ : Vector , lowerCAmelCase__ : Vector ) -> VectorOut:
return sum((va - va) ** 2 for va, va in zip(lowerCAmelCase__ , lowerCAmelCase__ ) ) ** (1 / 2)
if __name__ == "__main__":
def lowercase ( ) -> None:
from timeit import timeit
print('''Without Numpy''' )
print(
timeit(
'''euclidean_distance_no_np([1, 2, 3], [4, 5, 6])''' , number=10000 , globals=globals() , ) )
print('''With Numpy''' )
print(
timeit(
'''euclidean_distance([1, 2, 3], [4, 5, 6])''' , number=10000 , globals=globals() , ) )
benchmark()
| 11 |
"""simple docstring"""
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
lowercase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ):
super().__init__()
if hasattr(scheduler.config , '''steps_offset''' ) and scheduler.config.steps_offset != 1:
__a = (
f'''The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'''
f''' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '''
'''to update the config accordingly as leaving `steps_offset` might led to incorrect results'''
''' in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,'''
''' it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`'''
''' file'''
)
deprecate('''steps_offset!=1''' , '''1.0.0''' , _a , standard_warn=_a )
__a = dict(scheduler.config )
__a = 1
__a = FrozenDict(_a )
if hasattr(scheduler.config , '''skip_prk_steps''' ) and scheduler.config.skip_prk_steps is False:
__a = (
f'''The configuration file of this scheduler: {scheduler} has not set the configuration'''
''' `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make'''
''' sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to'''
''' incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face'''
''' Hub, it would be very nice if you could open a Pull request for the'''
''' `scheduler/scheduler_config.json` file'''
)
deprecate('''skip_prk_steps not set''' , '''1.0.0''' , _a , standard_warn=_a )
__a = dict(scheduler.config )
__a = True
__a = FrozenDict(_a )
if safety_checker is None:
logger.warning(
f'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
segmentation_model=_a , segmentation_processor=_a , vae=_a , text_encoder=_a , tokenizer=_a , unet=_a , scheduler=_a , safety_checker=_a , feature_extractor=_a , )
def __UpperCAmelCase ( self , _a = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__a = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_a )
def __UpperCAmelCase ( self ):
self.enable_attention_slicing(_a )
def __UpperCAmelCase ( self ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
__a = torch.device('''cuda''' )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(_a , _a )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __UpperCAmelCase ( self ):
if self.device != torch.device('''meta''' ) or not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_a , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self , _a , _a , _a , _a = 512 , _a = 512 , _a = 50 , _a = 7.5 , _a = None , _a = 1 , _a = 0.0 , _a = None , _a = None , _a = "pil" , _a = True , _a = None , _a = 1 , **_a , ):
__a = self.segmentation_processor(
text=[text] , images=[image] , padding='''max_length''' , return_tensors='''pt''' ).to(self.device )
__a = self.segmentation_model(**_a )
__a = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
__a = self.numpy_to_pil(_a )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
__a = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=_a , image=_a , mask_image=_a , height=_a , width=_a , num_inference_steps=_a , guidance_scale=_a , negative_prompt=_a , num_images_per_prompt=_a , eta=_a , generator=_a , latents=_a , output_type=_a , return_dict=_a , callback=_a , callback_steps=_a , )
| 11 | 1 |
"""simple docstring"""
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"artists_file": "artists.json",
"lyrics_file": "lyrics.json",
"genres_file": "genres.json",
}
lowercase_ = {
"artists_file": {
"jukebox": "https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json",
},
"genres_file": {
"jukebox": "https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json",
},
"lyrics_file": {
"jukebox": "https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json",
},
}
lowercase_ = {
"jukebox": 5_1_2,
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : str = VOCAB_FILES_NAMES
__UpperCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : str = PRETRAINED_LYRIC_TOKENS_SIZES
__UpperCAmelCase : Dict = ['input_ids', 'attention_mask']
def __init__( self , _a , _a , _a , _a=["v3", "v2", "v2"] , _a=512 , _a=5 , _a="<|endoftext|>" , **_a , ):
__a = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else unk_token
super().__init__(
unk_token=_a , n_genres=_a , version=_a , max_n_lyric_tokens=_a , **_a , )
__a = version
__a = max_n_lyric_tokens
__a = n_genres
with open(_a , encoding='''utf-8''' ) as vocab_handle:
__a = json.load(_a )
with open(_a , encoding='''utf-8''' ) as vocab_handle:
__a = json.load(_a )
with open(_a , encoding='''utf-8''' ) as vocab_handle:
__a = json.load(_a )
__a = R'''[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+'''
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 79:
__a = oov.replace(R'''\-\'''' , R'''\-+\'''' )
__a = regex.compile(_a )
__a = {v: k for k, v in self.artists_encoder.items()}
__a = {v: k for k, v in self.genres_encoder.items()}
__a = {v: k for k, v in self.lyrics_encoder.items()}
@property
def __UpperCAmelCase ( self ):
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def __UpperCAmelCase ( self ):
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def __UpperCAmelCase ( self , _a , _a , _a ):
__a = [self.artists_encoder.get(_a , 0 ) for artist in list_artists]
for genres in range(len(_a ) ):
__a = [self.genres_encoder.get(_a , 0 ) for genre in list_genres[genres]]
__a = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
__a = [[self.lyrics_encoder.get(_a , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def __UpperCAmelCase ( self , _a ):
return list(_a )
def __UpperCAmelCase ( self , _a , _a , _a , **_a ):
__a , __a , __a = self.prepare_for_tokenization(_a , _a , _a )
__a = self._tokenize(_a )
return artist, genre, lyrics
def __UpperCAmelCase ( self , _a , _a , _a , _a = False ):
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
__a = artists[idx].lower()
__a = [genres[idx].lower()]
else:
__a = self._normalize(artists[idx] ) + '''.v2'''
__a = [
self._normalize(_a ) + '''.v2''' for genre in genres[idx].split('''_''' )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
__a = regex.compile(R'''[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+''' )
__a = '''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+\'\"()[] \t\n'''
__a = {vocab[index]: index + 1 for index in range(len(_a ) )}
__a = 0
__a = len(_a ) + 1
__a = self.vocab
__a = {v: k for k, v in self.vocab.items()}
__a = ''''''
else:
__a = regex.compile(R'''[^A-Za-z0-9.,:;!?\-+\'\"()\[\] \t\n]+''' )
__a = self._run_strip_accents(_a )
__a = lyrics.replace('''\\''' , '''\n''' )
__a = self.out_of_vocab.sub('''''' , _a ), [], []
return artists, genres, lyrics
def __UpperCAmelCase ( self , _a ):
__a = unicodedata.normalize('''NFD''' , _a )
__a = []
for char in text:
__a = unicodedata.category(_a )
if cat == "Mn":
continue
output.append(_a )
return "".join(_a )
def __UpperCAmelCase ( self , _a ):
__a = (
[chr(_a ) for i in range(ord('''a''' ) , ord('''z''' ) + 1 )]
+ [chr(_a ) for i in range(ord('''A''' ) , ord('''Z''' ) + 1 )]
+ [chr(_a ) for i in range(ord('''0''' ) , ord('''9''' ) + 1 )]
+ ['''.''']
)
__a = frozenset(_a )
__a = re.compile(R'''_+''' )
__a = ''''''.join([c if c in accepted else '''_''' for c in text.lower()] )
__a = pattern.sub('''_''' , _a ).strip('''_''' )
return text
def __UpperCAmelCase ( self , _a ):
return " ".join(_a )
def __UpperCAmelCase ( self , _a , _a = None , _a = False ):
# Convert to TensorType
if not isinstance(_a , _a ):
__a = TensorType(_a )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
'''Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.''' )
import tensorflow as tf
__a = tf.constant
__a = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError('''Unable to convert output to PyTorch tensors format, PyTorch is not installed.''' )
import torch
__a = torch.tensor
__a = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError('''Unable to convert output to JAX tensors format, JAX is not installed.''' )
import jax.numpy as jnp # noqa: F811
__a = jnp.array
__a = _is_jax
else:
__a = np.asarray
__a = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
__a = [inputs]
if not is_tensor(_a ):
__a = as_tensor(_a )
except: # noqa E722
raise ValueError(
'''Unable to create tensor, you should probably activate truncation and/or padding '''
'''with \'padding=True\' \'truncation=True\' to have batched tensors with the same length.''' )
return inputs
def __call__( self , _a , _a , _a="" , _a="pt" ):
__a = [0, 0, 0]
__a = [artist] * len(self.version )
__a = [genres] * len(self.version )
__a , __a , __a = self.tokenize(_a , _a , _a )
__a , __a , __a = self._convert_token_to_id(_a , _a , _a )
__a = [-INFINITY] * len(full_tokens[-1] )
__a = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=_a )
for i in range(len(self.version ) )
]
return BatchEncoding({'''input_ids''': input_ids, '''attention_masks''': attention_masks} )
def __UpperCAmelCase ( self , _a , _a = None ):
if not os.path.isdir(_a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__a = os.path.join(
_a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''artists_file'''] )
with open(_a , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=_a ) )
__a = os.path.join(
_a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''genres_file'''] )
with open(_a , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=_a ) )
__a = os.path.join(
_a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''lyrics_file'''] )
with open(_a , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=_a ) )
return (artists_file, genres_file, lyrics_file)
def __UpperCAmelCase ( self , _a , _a , _a ):
__a = self.artists_decoder.get(_a )
__a = [self.genres_decoder.get(_a ) for genre in genres_index]
__a = [self.lyrics_decoder.get(_a ) for character in lyric_index]
return artist, genres, lyrics
| 11 |
"""simple docstring"""
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
lowercase_ = logging.get_logger(__name__)
@add_end_docstrings(__SCREAMING_SNAKE_CASE )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , **_a ):
super().__init__(**_a )
if self.framework != "pt":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
# No specific FOR_XXX available yet
def __call__( self , _a , **_a ):
return super().__call__(_a , **_a )
def __UpperCAmelCase ( self , **_a ):
__a = {}
if "candidate_labels" in kwargs:
__a = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
__a = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def __UpperCAmelCase ( self , _a , _a=None , _a="This is a sound of {}." ):
if isinstance(_a , _a ):
if audio.startswith('''http://''' ) or audio.startswith('''https://''' ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
__a = requests.get(_a ).content
else:
with open(_a , '''rb''' ) as f:
__a = f.read()
if isinstance(_a , _a ):
__a = ffmpeg_read(_a , self.feature_extractor.sampling_rate )
if not isinstance(_a , np.ndarray ):
raise ValueError('''We expect a numpy ndarray as input''' )
if len(audio.shape ) != 1:
raise ValueError('''We expect a single channel audio input for ZeroShotAudioClassificationPipeline''' )
__a = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors='''pt''' )
__a = candidate_labels
__a = [hypothesis_template.format(_a ) for x in candidate_labels]
__a = self.tokenizer(_a , return_tensors=self.framework , padding=_a )
__a = [text_inputs]
return inputs
def __UpperCAmelCase ( self , _a ):
__a = model_inputs.pop('''candidate_labels''' )
__a = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] , _a ):
__a = text_inputs[0]
else:
# Batching case.
__a = text_inputs[0][0]
__a = self.model(**_a , **_a )
__a = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_audio,
}
return model_outputs
def __UpperCAmelCase ( self , _a ):
__a = model_outputs.pop('''candidate_labels''' )
__a = model_outputs['''logits'''][0]
if self.framework == "pt":
__a = logits.softmax(dim=0 )
__a = probs.tolist()
else:
raise ValueError('''`tf` framework not supported.''' )
__a = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(_a , _a ) , key=lambda _a : -x[0] )
]
return result
| 11 | 1 |
"""simple docstring"""
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
lowercase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a , _a=768 ):
super().__init__(_a )
__a = proj_size
__a = CLIPVisionModel(_a )
__a = PaintByExampleMapper(_a )
__a = nn.LayerNorm(config.hidden_size )
__a = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
__a = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def __UpperCAmelCase ( self , _a , _a=False ):
__a = self.model(pixel_values=_a )
__a = clip_output.pooler_output
__a = self.mapper(latent_states[:, None] )
__a = self.final_layer_norm(_a )
__a = self.proj_out(_a )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , _a ):
super().__init__()
__a = (config.num_hidden_layers + 1) // 5
__a = config.hidden_size
__a = 1
__a = nn.ModuleList(
[
BasicTransformerBlock(_a , _a , _a , activation_fn='''gelu''' , attention_bias=_a )
for _ in range(_a )
] )
def __UpperCAmelCase ( self , _a ):
for block in self.blocks:
__a = block(_a )
return hidden_states
| 11 |
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self , _a ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ):
__a = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(_a )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
__a = '''sgugger/tiny-distilbert-classification'''
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , only_pretrain_model=_a , )
__a = PyTorchBenchmark(_a )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , torchscript=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , fpaa=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = AutoConfig.from_pretrained(_a )
# set architectures equal to `None`
__a = None
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a , configs=[config] )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == '''cpu''' , '''Can\'t do half precision''' )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , fpaa=_a , multi_process=_a , )
__a = PyTorchBenchmark(_a )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = AutoConfig.from_pretrained(_a )
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a , configs=[config] )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tinier_bart'''
__a = AutoConfig.from_pretrained(_a )
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a , configs=[config] )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = AutoConfig.from_pretrained(_a )
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a , configs=[config] )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tinier_bart'''
__a = AutoConfig.from_pretrained(_a )
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a , configs=[config] )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , save_to_csv=_a , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_a , '''inf_time.csv''' ) , train_memory_csv_file=os.path.join(_a , '''train_mem.csv''' ) , inference_memory_csv_file=os.path.join(_a , '''inf_mem.csv''' ) , train_time_csv_file=os.path.join(_a , '''train_time.csv''' ) , env_info_csv_file=os.path.join(_a , '''env.csv''' ) , multi_process=_a , )
__a = PyTorchBenchmark(_a )
benchmark.run()
self.assertTrue(Path(os.path.join(_a , '''inf_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_a , '''train_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_a , '''inf_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_a , '''train_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_a , '''env.csv''' ) ).exists() )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(_a ):
self.assertTrue(hasattr(_a , '''sequential''' ) )
self.assertTrue(hasattr(_a , '''cumulative''' ) )
self.assertTrue(hasattr(_a , '''current''' ) )
self.assertTrue(hasattr(_a , '''total''' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_a , '''log.txt''' ) , log_print=_a , trace_memory_line_by_line=_a , multi_process=_a , )
__a = PyTorchBenchmark(_a )
__a = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(_a , '''log.txt''' ) ).exists() )
| 11 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"RWKV/rwkv-4-169m-pile": "https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json",
"RWKV/rwkv-4-430m-pile": "https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json",
"RWKV/rwkv-4-1b5-pile": "https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json",
"RWKV/rwkv-4-3b-pile": "https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json",
"RWKV/rwkv-4-7b-pile": "https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json",
"RWKV/rwkv-4-14b-pile": "https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json",
"RWKV/rwkv-raven-1b5": "https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json",
"RWKV/rwkv-raven-3b": "https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json",
"RWKV/rwkv-raven-7b": "https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json",
"RWKV/rwkv-raven-14b": "https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json",
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Any = 'rwkv'
__UpperCAmelCase : Optional[Any] = {'max_position_embeddings': 'context_length'}
def __init__( self , _a=50_277 , _a=1_024 , _a=4_096 , _a=32 , _a=None , _a=None , _a=1E-5 , _a=0 , _a=0 , _a=6 , _a=False , _a=True , **_a , ):
__a = vocab_size
__a = context_length
__a = hidden_size
__a = num_hidden_layers
__a = attention_hidden_size if attention_hidden_size is not None else hidden_size
__a = intermediate_size if intermediate_size is not None else 4 * hidden_size
__a = layer_norm_epsilon
__a = rescale_every
__a = use_cache
__a = bos_token_id
__a = eos_token_id
super().__init__(
tie_word_embeddings=_a , bos_token_id=_a , eos_token_id=_a , **_a )
| 11 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FocalNetForImageClassification",
"FocalNetForMaskedImageModeling",
"FocalNetBackbone",
"FocalNetModel",
"FocalNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 11 | 1 |
"""simple docstring"""
# Lint as: python3
import itertools
import os
import re
lowercase_ = re.compile(r"([A-Z]+)([A-Z][a-z])")
lowercase_ = re.compile(r"([a-z\d])([A-Z])")
lowercase_ = re.compile(r"(?<!_)_(?!_)")
lowercase_ = re.compile(r"(_{2,})")
lowercase_ = r"^\w+(\.\w+)*$"
lowercase_ = r"<>:/\|?*"
def lowercase ( lowerCAmelCase__ : List[Any] ) -> Optional[int]:
__a = _uppercase_uppercase_re.sub(r'''\1_\2''' , lowerCAmelCase__ )
__a = _lowercase_uppercase_re.sub(r'''\1_\2''' , lowerCAmelCase__ )
return name.lower()
def lowercase ( lowerCAmelCase__ : Tuple ) -> Union[str, Any]:
__a = _single_underscore_re.split(lowerCAmelCase__ )
__a = [_multiple_underscores_re.split(lowerCAmelCase__ ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(lowerCAmelCase__ ) if n != '''''' )
def lowercase ( lowerCAmelCase__ : List[Any] ) -> Optional[Any]:
if os.path.basename(lowerCAmelCase__ ) != name:
raise ValueError(f'''Should be a dataset name, not a path: {name}''' )
return camelcase_to_snakecase(lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[Any] ) -> Tuple:
if os.path.basename(lowerCAmelCase__ ) != name:
raise ValueError(f'''Should be a dataset name, not a path: {name}''' )
if not re.match(_split_re , lowerCAmelCase__ ):
raise ValueError(f'''Split name should match \'{_split_re}\'\' but got \'{split}\'.''' )
return f'''{filename_prefix_for_name(lowerCAmelCase__ )}-{split}'''
def lowercase ( lowerCAmelCase__ : int , lowerCAmelCase__ : str , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[int]=None ) -> Dict:
__a = filename_prefix_for_split(lowerCAmelCase__ , lowerCAmelCase__ )
if filetype_suffix:
prefix += f'''.{filetype_suffix}'''
__a = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
return f'''{filepath}*'''
def lowercase ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : int , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[Any]=None , lowerCAmelCase__ : Any=None ) -> Optional[int]:
__a = filename_prefix_for_split(lowerCAmelCase__ , lowerCAmelCase__ )
__a = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
if shard_lengths:
__a = len(lowerCAmelCase__ )
__a = [f'''{prefix}-{shard_id:05d}-of-{num_shards:05d}''' for shard_id in range(lowerCAmelCase__ )]
if filetype_suffix:
__a = [filename + f'''.{filetype_suffix}''' for filename in filenames]
return filenames
else:
__a = prefix
if filetype_suffix:
filename += f'''.{filetype_suffix}'''
return [filename]
| 11 |
"""simple docstring"""
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self , _a , _a , _a ):
self.assertEqual(len(_a ) , len(_a ) )
for a, b in zip(_a , _a ):
self.assertAlmostEqual(_a , _a , delta=_a )
def __UpperCAmelCase ( self ):
__a = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(_a ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1E-2 )
def __UpperCAmelCase ( self ):
__a = None
ops.enable_eager_execution_internal()
__a = tf.config.list_physical_devices('''CPU''' )
if len(_a ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
__a = tf.config.list_logical_devices(device_type='''CPU''' )
__a = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
__a = GradientAccumulator()
__a = tf.Variable([4.0, 3.0] )
__a , __a = create_optimizer(5E-5 , 10 , 5 )
__a = tf.Variable([0.0, 0.0] , trainable=_a )
def accumulate_on_replica(_a ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(_a , _a ):
with strategy.scope():
__a = strategy.experimental_local_results(_a )
local_variables[0].assign(_a )
local_variables[1].assign(_a )
strategy.run(_a , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(_a )
def _check_local_values(_a , _a ):
__a = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , _a , tol=1E-2 )
self.assertListAlmostEqual(values[1].value() , _a , tol=1E-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 11 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"facebook/vit-mae-base": "https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Tuple = 'vit_mae'
def __init__( self , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.0 , _a=0.0 , _a=0.02 , _a=1E-12 , _a=224 , _a=16 , _a=3 , _a=True , _a=16 , _a=512 , _a=8 , _a=2_048 , _a=0.75 , _a=False , **_a , ):
super().__init__(**_a )
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = initializer_range
__a = layer_norm_eps
__a = image_size
__a = patch_size
__a = num_channels
__a = qkv_bias
__a = decoder_num_attention_heads
__a = decoder_hidden_size
__a = decoder_num_hidden_layers
__a = decoder_intermediate_size
__a = mask_ratio
__a = norm_pix_loss
| 11 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
lowercase_ = None
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
lowercase_ = {
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/mbart-large-en-ro": "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json",
"facebook/mbart-large-cc25": "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json",
},
}
lowercase_ = {
"facebook/mbart-large-en-ro": 1_0_2_4,
"facebook/mbart-large-cc25": 1_0_2_4,
}
# fmt: off
lowercase_ = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Tuple = VOCAB_FILES_NAMES
__UpperCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Tuple = ['input_ids', 'attention_mask']
__UpperCAmelCase : Optional[Any] = MBartTokenizer
__UpperCAmelCase : List[int] = []
__UpperCAmelCase : List[int] = []
def __init__( self , _a=None , _a=None , _a="<s>" , _a="</s>" , _a="</s>" , _a="<s>" , _a="<unk>" , _a="<pad>" , _a="<mask>" , _a=None , _a=None , _a=None , **_a , ):
# Mask token behave like a normal word, i.e. include the space before it
__a = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
super().__init__(
vocab_file=_a , tokenizer_file=_a , bos_token=_a , eos_token=_a , sep_token=_a , cls_token=_a , unk_token=_a , pad_token=_a , mask_token=_a , src_lang=_a , tgt_lang=_a , additional_special_tokens=_a , **_a , )
__a = vocab_file
__a = False if not self.vocab_file else True
__a = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
__a = {
lang_code: self.convert_tokens_to_ids(_a ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
__a = src_lang if src_lang is not None else '''en_XX'''
__a = self.convert_tokens_to_ids(self._src_lang )
__a = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __UpperCAmelCase ( self ):
return self._src_lang
@src_lang.setter
def __UpperCAmelCase ( self , _a ):
__a = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __UpperCAmelCase ( self , _a , _a = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __UpperCAmelCase ( self , _a , _a = None ):
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCAmelCase ( self , _a , _a , _a , _a , **_a ):
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
__a = src_lang
__a = self(_a , add_special_tokens=_a , return_tensors=_a , **_a )
__a = self.convert_tokens_to_ids(_a )
__a = tgt_lang_id
return inputs
def __UpperCAmelCase ( self , _a , _a = "en_XX" , _a = None , _a = "ro_RO" , **_a , ):
__a = src_lang
__a = tgt_lang
return super().prepare_seqaseq_batch(_a , _a , **_a )
def __UpperCAmelCase ( self ):
return self.set_src_lang_special_tokens(self.src_lang )
def __UpperCAmelCase ( self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __UpperCAmelCase ( self , _a ):
__a = self.convert_tokens_to_ids(_a )
__a = []
__a = [self.eos_token_id, self.cur_lang_code]
__a = self.convert_ids_to_tokens(self.prefix_tokens )
__a = self.convert_ids_to_tokens(self.suffix_tokens )
__a = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __UpperCAmelCase ( self , _a ):
__a = self.convert_tokens_to_ids(_a )
__a = []
__a = [self.eos_token_id, self.cur_lang_code]
__a = self.convert_ids_to_tokens(self.prefix_tokens )
__a = self.convert_ids_to_tokens(self.suffix_tokens )
__a = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __UpperCAmelCase ( self , _a , _a = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(_a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory.''' )
return
__a = os.path.join(
_a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file , _a )
return (out_vocab_file,)
| 11 | 1 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a=2 , _a=3 , _a=64 , _a=None ):
__a = np.random.default_rng(_a )
__a = length
__a = rng.normal(size=(length,) ).astype(np.floataa )
__a = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self ):
return self.length
def __getitem__( self , _a ):
return {"x": self.x[i], "y": self.y[i]}
class __lowerCAmelCase ( torch.nn.Module ):
'''simple docstring'''
def __init__( self , _a=0 , _a=0 , _a=False ):
super().__init__()
__a = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
__a = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
__a = True
def __UpperCAmelCase ( self , _a=None ):
if self.first_batch:
print(f'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
__a = False
return x * self.a[0] + self.b[0]
class __lowerCAmelCase ( torch.nn.Module ):
'''simple docstring'''
def __init__( self , _a=0 , _a=0 , _a=False ):
super().__init__()
__a = torch.nn.Parameter(torch.tensor(_a ).float() )
__a = torch.nn.Parameter(torch.tensor(_a ).float() )
__a = True
def __UpperCAmelCase ( self , _a=None ):
if self.first_batch:
print(f'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
__a = False
return x * self.a + self.b
def lowercase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : int = 16 ) -> List[str]:
from datasets import load_dataset
from transformers import AutoTokenizer
__a = AutoTokenizer.from_pretrained('''bert-base-cased''' )
__a = {'''train''': '''tests/test_samples/MRPC/train.csv''', '''validation''': '''tests/test_samples/MRPC/dev.csv'''}
__a = load_dataset('''csv''' , data_files=lowerCAmelCase__ )
__a = datasets['''train'''].unique('''label''' )
__a = {v: i for i, v in enumerate(lowerCAmelCase__ )}
def tokenize_function(lowerCAmelCase__ : int ):
# max_length=None => use the model max length (it's actually the default)
__a = tokenizer(
examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding='''max_length''' )
if "label" in examples:
__a = [label_to_id[l] for l in examples['''label''']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__a = datasets.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , remove_columns=['''sentence1''', '''sentence2''', '''label'''] , )
def collate_fn(lowerCAmelCase__ : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowerCAmelCase__ , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(lowerCAmelCase__ , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
__a = DataLoader(tokenized_datasets['''train'''] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=2 )
__a = DataLoader(tokenized_datasets['''validation'''] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=1 )
return train_dataloader, eval_dataloader
| 11 |
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
lowercase_ = "\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n"
class __lowerCAmelCase ( unittest.TestCase , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
__a = load_tool('''text-question-answering''' )
self.tool.setup()
__a = load_tool('''text-question-answering''' , remote=_a )
def __UpperCAmelCase ( self ):
__a = self.tool(_a , '''What did Hugging Face do in April 2021?''' )
self.assertEqual(_a , '''launched the BigScience Research Workshop''' )
def __UpperCAmelCase ( self ):
__a = self.remote_tool(_a , '''What did Hugging Face do in April 2021?''' )
self.assertEqual(_a , '''launched the BigScience Research Workshop''' )
def __UpperCAmelCase ( self ):
__a = self.tool(text=_a , question='''What did Hugging Face do in April 2021?''' )
self.assertEqual(_a , '''launched the BigScience Research Workshop''' )
def __UpperCAmelCase ( self ):
__a = self.remote_tool(text=_a , question='''What did Hugging Face do in April 2021?''' )
self.assertEqual(_a , '''launched the BigScience Research Workshop''' )
| 11 | 1 |
"""simple docstring"""
from __future__ import annotations
import os
from collections.abc import Mapping
lowercase_ = tuple[int, int]
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a , _a ):
__a = vertices
__a = {
(min(_a ), max(_a )): weight for edge, weight in edges.items()
}
def __UpperCAmelCase ( self , _a , _a ):
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
__a = weight
def __UpperCAmelCase ( self ):
__a = Graph({min(self.vertices )} , {} )
__a = 42
__a = 42
__a = 42
__a = 42
while len(subgraph.vertices ) < len(self.vertices ):
__a = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
__a = edge
__a = weight
subgraph.add_edge(_a , _a )
return subgraph
def lowercase ( lowerCAmelCase__ : str = "p107_network.txt" ) -> int:
__a = os.path.abspath(os.path.dirname(lowerCAmelCase__ ) )
__a = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
__a = {}
__a = 42
__a = 42
__a = 42
with open(lowerCAmelCase__ ) as f:
__a = f.read().strip().split('''\n''' )
__a = [line.split(''',''' ) for line in data]
for edgea in range(1 , len(lowerCAmelCase__ ) ):
for edgea in range(lowerCAmelCase__ ):
if adjaceny_matrix[edgea][edgea] != "-":
__a = int(adjaceny_matrix[edgea][edgea] )
__a = Graph(set(range(len(lowerCAmelCase__ ) ) ) , lowerCAmelCase__ )
__a = graph.prims_algorithm()
__a = sum(graph.edges.values() )
__a = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(F'''{solution() = }''')
| 11 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {
"configuration_blip_2": [
"BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Blip2Config",
"Blip2QFormerConfig",
"Blip2VisionConfig",
],
"processing_blip_2": ["Blip2Processor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Blip2Model",
"Blip2QFormerModel",
"Blip2PreTrainedModel",
"Blip2ForConditionalGeneration",
"Blip2VisionModel",
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 11 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowercase_ = {"tokenization_byt5": ["ByT5Tokenizer"]}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 11 |
"""simple docstring"""
from __future__ import annotations
def lowercase ( lowerCAmelCase__ : list[int] , lowerCAmelCase__ : list[int] , lowerCAmelCase__ : list[int] , lowerCAmelCase__ : list[list[str]] , lowerCAmelCase__ : int , ) -> None:
__a = len(lowerCAmelCase__ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['''. ''' * i + '''Q ''' + '''. ''' * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(lowerCAmelCase__ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , lowerCAmelCase__ , lowerCAmelCase__ , )
def lowercase ( lowerCAmelCase__ : int ) -> None:
__a = []
depth_first_search([] , [] , [] , lowerCAmelCase__ , lowerCAmelCase__ )
# Print all the boards
for board in boards:
for column in board:
print(lowerCAmelCase__ )
print('''''' )
print(len(lowerCAmelCase__ ) , '''solutions were found.''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 11 | 1 |
"""simple docstring"""
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
lowercase_ = logging.getLogger(__name__)
def lowercase ( lowerCAmelCase__ : str ) -> Optional[int]:
__a = git.Repo(search_parent_directories=lowerCAmelCase__ )
__a = {
'''repo_id''': str(lowerCAmelCase__ ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
}
with open(os.path.join(lowerCAmelCase__ , '''git_log.json''' ) , '''w''' ) as f:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ , indent=4 )
def lowercase ( lowerCAmelCase__ : int ) -> List[str]:
if params.n_gpu <= 0:
__a = 0
__a = -1
__a = True
__a = False
return
assert torch.cuda.is_available()
logger.info('''Initializing GPUs''' )
if params.n_gpu > 1:
assert params.local_rank != -1
__a = int(os.environ['''WORLD_SIZE'''] )
__a = int(os.environ['''N_GPU_NODE'''] )
__a = int(os.environ['''RANK'''] )
# number of nodes / node ID
__a = params.world_size // params.n_gpu_per_node
__a = params.global_rank // params.n_gpu_per_node
__a = True
assert params.n_nodes == int(os.environ['''N_NODES'''] )
assert params.node_id == int(os.environ['''NODE_RANK'''] )
# local job (single GPU)
else:
assert params.local_rank == -1
__a = 1
__a = 0
__a = 0
__a = 0
__a = 1
__a = 1
__a = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
__a = params.node_id == 0 and params.local_rank == 0
__a = params.n_nodes > 1
# summary
__a = f'''--- Global rank: {params.global_rank} - '''
logger.info(PREFIX + '''Number of nodes: %i''' % params.n_nodes )
logger.info(PREFIX + '''Node ID : %i''' % params.node_id )
logger.info(PREFIX + '''Local rank : %i''' % params.local_rank )
logger.info(PREFIX + '''World size : %i''' % params.world_size )
logger.info(PREFIX + '''GPUs per node : %i''' % params.n_gpu_per_node )
logger.info(PREFIX + '''Master : %s''' % str(params.is_master ) )
logger.info(PREFIX + '''Multi-node : %s''' % str(params.multi_node ) )
logger.info(PREFIX + '''Multi-GPU : %s''' % str(params.multi_gpu ) )
logger.info(PREFIX + '''Hostname : %s''' % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info('''Initializing PyTorch distributed''' )
torch.distributed.init_process_group(
init_method='''env://''' , backend='''nccl''' , )
def lowercase ( lowerCAmelCase__ : Union[str, Any] ) -> Optional[int]:
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 11 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase_ = {
"configuration_vision_text_dual_encoder": ["VisionTextDualEncoderConfig"],
"processing_vision_text_dual_encoder": ["VisionTextDualEncoderProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["VisionTextDualEncoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["FlaxVisionTextDualEncoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["TFVisionTextDualEncoderModel"]
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 11 | 1 |
"""simple docstring"""
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
lowercase_ = (
"4S 3H 2C 7S 5H",
"9D 8H 2C 6S 7H",
"2D 6D 9D TH 7D",
"TC 8C 2S JH 6C",
"JH 8S TH AH QH",
"TS KS 5S 9S AC",
"KD 6S 9D TH AD",
"KS 8D 4D 9S 4S", # pair
"8C 4S KH JS 4D", # pair
"QH 8H KD JH 8S", # pair
"KC 4H KS 2H 8D", # pair
"KD 4S KC 3H 8S", # pair
"AH 8S AS KC JH", # pair
"3H 4C 4H 3S 2H", # 2 pairs
"5S 5D 2C KH KH", # 2 pairs
"3C KH 5D 5S KH", # 2 pairs
"AS 3C KH AD KH", # 2 pairs
"7C 7S 3S 7H 5S", # 3 of a kind
"7C 7S KH 2H 7H", # 3 of a kind
"AC KH QH AH AS", # 3 of a kind
"2H 4D 3C AS 5S", # straight (low ace)
"3C 5C 4C 2C 6H", # straight
"6S 8S 7S 5H 9H", # straight
"JS QS 9H TS KH", # straight
"QC KH TS JS AH", # straight (high ace)
"8C 9C 5C 3C TC", # flush
"3S 8S 9S 5S KS", # flush
"4C 5C 9C 8C KC", # flush
"JH 8H AH KH QH", # flush
"3D 2H 3H 2C 2D", # full house
"2H 2C 3S 3H 3D", # full house
"KH KC 3S 3H 3D", # full house
"JC 6H JS JD JH", # 4 of a kind
"JC 7H JS JD JH", # 4 of a kind
"JC KH JS JD JH", # 4 of a kind
"2S AS 4S 5S 3S", # straight flush (low ace)
"2D 6D 3D 4D 5D", # straight flush
"5C 6C 3C 7C 4C", # straight flush
"JH 9H TH KH QH", # straight flush
"JH AH TH KH QH", # royal flush (high ace straight flush)
)
lowercase_ = (
("2H 3H 4H 5H 6H", "KS AS TS QS JS", "Loss"),
("2H 3H 4H 5H 6H", "AS AD AC AH JD", "Win"),
("AS AH 2H AD AC", "JS JD JC JH 3D", "Win"),
("2S AH 2H AS AC", "JS JD JC JH AD", "Loss"),
("2S AH 2H AS AC", "2H 3H 5H 6H 7H", "Win"),
("AS 3S 4S 8S 2S", "2H 3H 5H 6H 7H", "Win"),
("2H 3H 5H 6H 7H", "2S 3H 4H 5S 6C", "Win"),
("2S 3H 4H 5S 6C", "3D 4C 5H 6H 2S", "Tie"),
("2S 3H 4H 5S 6C", "AH AC 5H 6H AS", "Win"),
("2S 2H 4H 5S 4C", "AH AC 5H 6H AS", "Loss"),
("2S 2H 4H 5S 4C", "AH AC 5H 6H 7S", "Win"),
("6S AD 7H 4S AS", "AH AC 5H 6H 7S", "Loss"),
("2S AH 4H 5S KC", "AH AC 5H 6H 7S", "Loss"),
("2S 3H 6H 7S 9C", "7H 3C TH 6H 9S", "Loss"),
("4S 5H 6H TS AC", "3S 5H 6H TS AC", "Win"),
("2S AH 4H 5S 6C", "AD 4C 5H 6H 2C", "Tie"),
("AS AH 3H AD AC", "AS AH 2H AD AC", "Win"),
("AH AC 5H 5C QS", "AH AC 5H 5C KS", "Loss"),
("AH AC 5H 5C QS", "KH KC 5H 5C QS", "Win"),
("7C 7S KH 2H 7H", "3C 3S AH 2H 3H", "Win"),
("3C 3S AH 2H 3H", "7C 7S KH 2H 7H", "Loss"),
("6H 5H 4H 3H 2H", "5H 4H 3H 2H AH", "Win"),
("5H 4H 3H 2H AH", "5H 4H 3H 2H AH", "Tie"),
("5H 4H 3H 2H AH", "6H 5H 4H 3H 2H", "Loss"),
("AH AD KS KC AC", "AH KD KH AC KC", "Win"),
("2H 4D 3C AS 5S", "2H 4D 3C 6S 5S", "Loss"),
("2H 3S 3C 3H 2S", "3S 3C 2S 2H 2D", "Win"),
("4D 6D 5D 2D JH", "3S 8S 3H TC KH", "Loss"),
("4S 6C 8S 3S 7S", "AD KS 2D 7D 7C", "Loss"),
("6S 4C 7H 8C 3H", "5H JC AH 9D 9C", "Loss"),
("9D 9H JH TC QH", "3C 2S JS 5C 7H", "Win"),
("2H TC 8S AD 9S", "4H TS 7H 2C 5C", "Win"),
("9D 3S 2C 7S 7C", "JC TD 3C TC 9H", "Loss"),
)
lowercase_ = (
("2H 3H 4H 5H 6H", True),
("AS AH 2H AD AC", False),
("2H 3H 5H 6H 7H", True),
("KS AS TS QS JS", True),
("8H 9H QS JS TH", False),
("AS 3S 4S 8S 2S", True),
)
lowercase_ = (
("2H 3H 4H 5H 6H", True),
("AS AH 2H AD AC", False),
("2H 3H 5H 6H 7H", False),
("KS AS TS QS JS", True),
("8H 9H QS JS TH", True),
)
lowercase_ = (
("2H 4D 3C AS 5S", True, [5, 4, 3, 2, 1_4]),
("2H 5D 3C AS 5S", False, [1_4, 5, 5, 3, 2]),
("JH QD KC AS TS", False, [1_4, 1_3, 1_2, 1_1, 1_0]),
("9D 3S 2C 7S 7C", False, [9, 7, 7, 3, 2]),
)
lowercase_ = (
("JH AH TH KH QH", 0),
("JH 9H TH KH QH", 0),
("JC KH JS JD JH", 7),
("KH KC 3S 3H 3D", 6),
("8C 9C 5C 3C TC", 0),
("JS QS 9H TS KH", 0),
("7C 7S KH 2H 7H", 3),
("3C KH 5D 5S KH", 2),
("QH 8H KD JH 8S", 1),
("2D 6D 9D TH 7D", 0),
)
lowercase_ = (
("JH AH TH KH QH", 2_3),
("JH 9H TH KH QH", 2_2),
("JC KH JS JD JH", 2_1),
("KH KC 3S 3H 3D", 2_0),
("8C 9C 5C 3C TC", 1_9),
("JS QS 9H TS KH", 1_8),
("7C 7S KH 2H 7H", 1_7),
("3C KH 5D 5S KH", 1_6),
("QH 8H KD JH 8S", 1_5),
("2D 6D 9D TH 7D", 1_4),
)
def lowercase ( ) -> Optional[Any]:
__a , __a = randrange(len(lowerCAmelCase__ ) ), randrange(len(lowerCAmelCase__ ) )
__a = ['''Loss''', '''Tie''', '''Win'''][(play >= oppo) + (play > oppo)]
__a , __a = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def lowercase ( lowerCAmelCase__ : int = 100 ) -> Union[str, Any]:
return (generate_random_hand() for _ in range(lowerCAmelCase__ ))
@pytest.mark.parametrize('''hand, expected''' , lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple ) -> Optional[Any]:
assert PokerHand(lowerCAmelCase__ )._is_flush() == expected
@pytest.mark.parametrize('''hand, expected''' , lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[Any] ) -> Any:
assert PokerHand(lowerCAmelCase__ )._is_straight() == expected
@pytest.mark.parametrize('''hand, expected, card_values''' , lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : List[str] ) -> int:
__a = PokerHand(lowerCAmelCase__ )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('''hand, expected''' , lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[str] ) -> Tuple:
assert PokerHand(lowerCAmelCase__ )._is_same_kind() == expected
@pytest.mark.parametrize('''hand, expected''' , lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Dict ) -> Tuple:
assert PokerHand(lowerCAmelCase__ )._hand_type == expected
@pytest.mark.parametrize('''hand, other, expected''' , lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : int ) -> Optional[int]:
assert PokerHand(lowerCAmelCase__ ).compare_with(PokerHand(lowerCAmelCase__ ) ) == expected
@pytest.mark.parametrize('''hand, other, expected''' , generate_random_hands() )
def lowercase ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[Any] ) -> Dict:
assert PokerHand(lowerCAmelCase__ ).compare_with(PokerHand(lowerCAmelCase__ ) ) == expected
def lowercase ( ) -> str:
__a = [PokerHand(lowerCAmelCase__ ) for hand in SORTED_HANDS]
__a = poker_hands.copy()
shuffle(lowerCAmelCase__ )
__a = chain(sorted(lowerCAmelCase__ ) )
for index, hand in enumerate(lowerCAmelCase__ ):
assert hand == poker_hands[index]
def lowercase ( ) -> Tuple:
# Test that five high straights are compared correctly.
__a = [PokerHand('''2D AC 3H 4H 5S''' ), PokerHand('''2S 3H 4H 5S 6C''' )]
pokerhands.sort(reverse=lowerCAmelCase__ )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def lowercase ( ) -> Optional[Any]:
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
__a = PokerHand('''2C 4S AS 3D 5C''' )
__a = True
__a = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def lowercase ( ) -> str:
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
__a = 0
__a = os.path.abspath(os.path.dirname(lowerCAmelCase__ ) )
__a = os.path.join(lowerCAmelCase__ , '''poker_hands.txt''' )
with open(lowerCAmelCase__ ) as file_hand:
for line in file_hand:
__a = line[:14].strip()
__a = line[15:].strip()
__a , __a = PokerHand(lowerCAmelCase__ ), PokerHand(lowerCAmelCase__ )
__a = player.compare_with(lowerCAmelCase__ )
if output == "Win":
answer += 1
assert answer == 376
| 11 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"facebook/vit-mae-base": "https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Tuple = 'vit_mae'
def __init__( self , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.0 , _a=0.0 , _a=0.02 , _a=1E-12 , _a=224 , _a=16 , _a=3 , _a=True , _a=16 , _a=512 , _a=8 , _a=2_048 , _a=0.75 , _a=False , **_a , ):
super().__init__(**_a )
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = initializer_range
__a = layer_norm_eps
__a = image_size
__a = patch_size
__a = num_channels
__a = qkv_bias
__a = decoder_num_attention_heads
__a = decoder_hidden_size
__a = decoder_num_hidden_layers
__a = decoder_intermediate_size
__a = mask_ratio
__a = norm_pix_loss
| 11 | 1 |
"""simple docstring"""
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=False , _a=True , _a=99 , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=3 , _a=4 , _a=None , ):
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_input_mask
__a = use_token_type_ids
__a = use_labels
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = type_sequence_label_size
__a = initializer_range
__a = num_labels
__a = num_choices
__a = scope
def __UpperCAmelCase ( self ):
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = None
if self.use_input_mask:
__a = random_attention_mask([self.batch_size, self.seq_length] )
__a = None
if self.use_token_type_ids:
__a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a = None
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a = ids_tensor([self.batch_size] , self.num_choices )
__a = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self ):
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a ):
__a = BioGptModel(config=_a )
model.to(_a )
model.eval()
__a = model(_a , attention_mask=_a )
__a = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ):
__a = BioGptForCausalLM(config=_a )
model.to(_a )
model.eval()
__a = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , *_a ):
__a = BioGptModel(config=_a )
model.to(_a )
model.eval()
# create attention mask
__a = torch.ones(input_ids.shape , dtype=torch.long , device=_a )
__a = self.seq_length // 2
__a = 0
# first forward pass
__a , __a = model(_a , attention_mask=_a ).to_tuple()
# create hypothetical next token and extent to next_input_ids
__a = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
__a = ids_tensor((1,) , _a ).item() + 1
__a = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
__a = random_other_next_tokens
# append to next input_ids and attn_mask
__a = torch.cat([input_ids, next_tokens] , dim=-1 )
__a = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=_a )] , dim=1 , )
# get two different outputs
__a = model(_a , attention_mask=_a )['''last_hidden_state''']
__a = model(_a , past_key_values=_a , attention_mask=_a )['''last_hidden_state''']
# select random slice
__a = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__a = output_from_no_past[:, -1, random_slice_idx].detach()
__a = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_a , _a , atol=1E-3 ) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , *_a ):
__a = BioGptModel(config=_a ).to(_a ).eval()
__a = torch.ones(input_ids.shape , dtype=torch.long , device=_a )
# first forward pass
__a = model(_a , attention_mask=_a , use_cache=_a )
__a , __a = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
__a = ids_tensor((self.batch_size, 3) , config.vocab_size )
__a = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
__a = torch.cat([input_ids, next_tokens] , dim=-1 )
__a = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
__a = model(_a , attention_mask=_a )['''last_hidden_state''']
__a = model(_a , attention_mask=_a , past_key_values=_a )[
'''last_hidden_state'''
]
# select random slice
__a = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__a = output_from_no_past[:, -3:, random_slice_idx].detach()
__a = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_a , _a , atol=1E-3 ) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , *_a , _a=False ):
__a = BioGptForCausalLM(_a )
model.to(_a )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
__a = model(_a , labels=_a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def __UpperCAmelCase ( self , _a , *_a ):
__a = BioGptModel(_a )
__a = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , *_a ):
__a = self.num_labels
__a = BioGptForTokenClassification(_a )
model.to(_a )
model.eval()
__a = model(_a , attention_mask=_a , token_type_ids=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCAmelCase ( self ):
__a = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = config_and_inputs
__a = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : List[str] = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
__UpperCAmelCase : List[Any] = (BioGptForCausalLM,) if is_torch_available() else ()
__UpperCAmelCase : str = (
{
'feature-extraction': BioGptModel,
'text-classification': BioGptForSequenceClassification,
'text-generation': BioGptForCausalLM,
'token-classification': BioGptForTokenClassification,
'zero-shot': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase : Dict = False
def __UpperCAmelCase ( self ):
__a = BioGptModelTester(self )
__a = ConfigTester(self , config_class=_a , hidden_size=37 )
def __UpperCAmelCase ( self ):
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__a = type
self.model_tester.create_and_check_model(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*_a , gradient_checkpointing=_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*_a )
@slow
def __UpperCAmelCase ( self ):
__a = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(_a )
__a = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
__a = '''left'''
# Define PAD Token = EOS Token = 50256
__a = tokenizer.eos_token
__a = model.config.eos_token_id
# use different length sentences to test batching
__a = [
'''Hello, my dog is a little''',
'''Today, I''',
]
__a = tokenizer(_a , return_tensors='''pt''' , padding=_a )
__a = inputs['''input_ids'''].to(_a )
__a = model.generate(
input_ids=_a , attention_mask=inputs['''attention_mask'''].to(_a ) , )
__a = tokenizer(sentences[0] , return_tensors='''pt''' ).input_ids.to(_a )
__a = model.generate(input_ids=_a )
__a = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item()
__a = tokenizer(sentences[1] , return_tensors='''pt''' ).input_ids.to(_a )
__a = model.generate(input_ids=_a , max_length=model.config.max_length - num_paddings )
__a = tokenizer.batch_decode(_a , skip_special_tokens=_a )
__a = tokenizer.decode(output_non_padded[0] , skip_special_tokens=_a )
__a = tokenizer.decode(output_padded[0] , skip_special_tokens=_a )
__a = [
'''Hello, my dog is a little bit bigger than a little bit.''',
'''Today, I have a good idea of how to use the information''',
]
self.assertListEqual(_a , _a )
self.assertListEqual(_a , [non_padded_sentence, padded_sentence] )
@slow
def __UpperCAmelCase ( self ):
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = BioGptModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def __UpperCAmelCase ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = 3
__a = input_dict['''input_ids''']
__a = input_ids.ne(1 ).to(_a )
__a = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__a = BioGptForSequenceClassification(_a )
model.to(_a )
model.eval()
__a = model(_a , attention_mask=_a , labels=_a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __UpperCAmelCase ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = 3
__a = '''multi_label_classification'''
__a = input_dict['''input_ids''']
__a = input_ids.ne(1 ).to(_a )
__a = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__a = BioGptForSequenceClassification(_a )
model.to(_a )
model.eval()
__a = model(_a , attention_mask=_a , labels=_a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self ):
__a = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
__a = torch.tensor([[2, 4_805, 9, 656, 21]] )
__a = model(_a )[0]
__a = 42_384
__a = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , _a )
__a = torch.tensor(
[[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _a , atol=1E-4 ) )
@slow
def __UpperCAmelCase ( self ):
__a = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
__a = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(_a )
torch.manual_seed(0 )
__a = tokenizer('''COVID-19 is''' , return_tensors='''pt''' ).to(_a )
__a = model.generate(
**_a , min_length=100 , max_length=1_024 , num_beams=5 , early_stopping=_a , )
__a = tokenizer.decode(output_ids[0] , skip_special_tokens=_a )
__a = (
'''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'''
''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'''
''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'''
''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'''
''' more than 800,000 deaths.'''
)
self.assertEqual(_a , _a )
| 11 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : str = ['image_processor', 'tokenizer']
__UpperCAmelCase : str = 'LayoutLMv3ImageProcessor'
__UpperCAmelCase : Optional[int] = ('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast')
def __init__( self , _a=None , _a=None , **_a ):
__a = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _a , )
__a = kwargs.pop('''feature_extractor''' )
__a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_a , _a )
def __call__( self , _a , _a = None , _a = None , _a = None , _a = None , _a = True , _a = False , _a = None , _a = None , _a = 0 , _a = None , _a = None , _a = None , _a = False , _a = False , _a = False , _a = False , _a = True , _a = None , **_a , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
# first, apply the image processor
__a = self.image_processor(images=_a , return_tensors=_a )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(_a , _a ):
__a = [text] # add batch dimension (as the image processor always adds a batch dimension)
__a = features['''words''']
__a = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_token_type_ids=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
# add pixel values
__a = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
__a = self.get_overflowing_images(_a , encoded_inputs['''overflow_to_sample_mapping'''] )
__a = images
return encoded_inputs
def __UpperCAmelCase ( self , _a , _a ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
__a = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(_a ) != len(_a ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f''' {len(_a )} and {len(_a )}''' )
return images_with_overflow
def __UpperCAmelCase ( self , *_a , **_a ):
return self.tokenizer.batch_decode(*_a , **_a )
def __UpperCAmelCase ( self , *_a , **_a ):
return self.tokenizer.decode(*_a , **_a )
@property
def __UpperCAmelCase ( self ):
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def __UpperCAmelCase ( self ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _a , )
return self.image_processor_class
@property
def __UpperCAmelCase ( self ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _a , )
return self.image_processor
| 11 | 1 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : float , lowerCAmelCase__ : float ) -> float:
if mass < 0:
raise ValueError('''The mass of a body cannot be negative''' )
return 0.5 * mass * abs(lowerCAmelCase__ ) * abs(lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 11 |
"""simple docstring"""
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowercase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any]=0.9_99 , lowerCAmelCase__ : List[str]="cosine" , ) -> Optional[int]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowerCAmelCase__ : int ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowerCAmelCase__ : Optional[Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
__a = []
for i in range(lowerCAmelCase__ ):
__a = i / num_diffusion_timesteps
__a = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowerCAmelCase__ ) / alpha_bar_fn(lowerCAmelCase__ ) , lowerCAmelCase__ ) )
return torch.tensor(lowerCAmelCase__ , dtype=torch.floataa )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Tuple = [e.name for e in KarrasDiffusionSchedulers]
__UpperCAmelCase : str = 2
@register_to_config
def __init__( self , _a = 1_000 , _a = 0.0_0085 , _a = 0.012 , _a = "linear" , _a = None , _a = "epsilon" , _a = "linspace" , _a = 0 , ):
if trained_betas is not None:
__a = torch.tensor(_a , dtype=torch.floataa )
elif beta_schedule == "linear":
__a = torch.linspace(_a , _a , _a , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__a = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , _a , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__a = betas_for_alpha_bar(_a )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
__a = 1.0 - self.betas
__a = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(_a , _a , _a )
def __UpperCAmelCase ( self , _a , _a=None ):
if schedule_timesteps is None:
__a = self.timesteps
__a = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
__a = 1 if len(_a ) > 1 else 0
else:
__a = timestep.cpu().item() if torch.is_tensor(_a ) else timestep
__a = self._index_counter[timestep_int]
return indices[pos].item()
@property
def __UpperCAmelCase ( self ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __UpperCAmelCase ( self , _a , _a , ):
__a = self.index_for_timestep(_a )
if self.state_in_first_order:
__a = self.sigmas[step_index]
else:
__a = self.sigmas_interpol[step_index]
__a = sample / ((sigma**2 + 1) ** 0.5)
return sample
def __UpperCAmelCase ( self , _a , _a = None , _a = None , ):
__a = num_inference_steps
__a = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
__a = np.linspace(0 , num_train_timesteps - 1 , _a , dtype=_a )[::-1].copy()
elif self.config.timestep_spacing == "leading":
__a = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__a = (np.arange(0 , _a ) * step_ratio).round()[::-1].copy().astype(_a )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
__a = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__a = (np.arange(_a , 0 , -step_ratio )).round().copy().astype(_a )
timesteps -= 1
else:
raise ValueError(
f'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
__a = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
__a = torch.from_numpy(np.log(_a ) ).to(_a )
__a = np.interp(_a , np.arange(0 , len(_a ) ) , _a )
__a = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
__a = torch.from_numpy(_a ).to(device=_a )
# interpolate sigmas
__a = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
__a = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
__a = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(_a ).startswith('''mps''' ):
# mps does not support float64
__a = torch.from_numpy(_a ).to(_a , dtype=torch.floataa )
else:
__a = torch.from_numpy(_a ).to(_a )
# interpolate timesteps
__a = self.sigma_to_t(_a ).to(_a , dtype=timesteps.dtype )
__a = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
__a = torch.cat([timesteps[:1], interleaved_timesteps] )
__a = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
__a = defaultdict(_a )
def __UpperCAmelCase ( self , _a ):
# get log sigma
__a = sigma.log()
# get distribution
__a = log_sigma - self.log_sigmas[:, None]
# get sigmas range
__a = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
__a = low_idx + 1
__a = self.log_sigmas[low_idx]
__a = self.log_sigmas[high_idx]
# interpolate sigmas
__a = (low - log_sigma) / (low - high)
__a = w.clamp(0 , 1 )
# transform interpolation to time range
__a = (1 - w) * low_idx + w * high_idx
__a = t.view(sigma.shape )
return t
@property
def __UpperCAmelCase ( self ):
return self.sample is None
def __UpperCAmelCase ( self , _a , _a , _a , _a = True , ):
__a = self.index_for_timestep(_a )
# advance index counter by 1
__a = timestep.cpu().item() if torch.is_tensor(_a ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
__a = self.sigmas[step_index]
__a = self.sigmas_interpol[step_index + 1]
__a = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
__a = self.sigmas[step_index - 1]
__a = self.sigmas_interpol[step_index]
__a = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
__a = 0
__a = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
__a = sigma_hat if self.state_in_first_order else sigma_interpol
__a = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
__a = sigma_hat if self.state_in_first_order else sigma_interpol
__a = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError('''prediction_type not implemented yet: sample''' )
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
__a = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
__a = sigma_interpol - sigma_hat
# store for 2nd order step
__a = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
__a = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
__a = sigma_next - sigma_hat
__a = self.sample
__a = None
__a = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_a )
def __UpperCAmelCase ( self , _a , _a , _a , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__a = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(_a ):
# mps does not support float64
__a = self.timesteps.to(original_samples.device , dtype=torch.floataa )
__a = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
__a = self.timesteps.to(original_samples.device )
__a = timesteps.to(original_samples.device )
__a = [self.index_for_timestep(_a , _a ) for t in timesteps]
__a = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
__a = sigma.unsqueeze(-1 )
__a = original_samples + noise * sigma
return noisy_samples
def __len__( self ):
return self.config.num_train_timesteps
| 11 | 1 |
"""simple docstring"""
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
lowercase_ = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , _a ):
super().__init__()
__a = torchvision.models.resnetaaa(pretrained=_a )
__a = list(model.children() )[:-2]
__a = nn.Sequential(*_a )
__a = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def __UpperCAmelCase ( self , _a ):
# Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048
__a = self.pool(self.model(_a ) )
__a = torch.flatten(_a , start_dim=2 )
__a = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a , _a , _a , _a , _a ):
__a = [json.loads(_a ) for l in open(_a )]
__a = os.path.dirname(_a )
__a = tokenizer
__a = labels
__a = len(_a )
__a = max_seq_length
__a = transforms
def __len__( self ):
return len(self.data )
def __getitem__( self , _a ):
__a = torch.LongTensor(self.tokenizer.encode(self.data[index]['''text'''] , add_special_tokens=_a ) )
__a , __a , __a = sentence[0], sentence[1:-1], sentence[-1]
__a = sentence[: self.max_seq_length]
__a = torch.zeros(self.n_classes )
__a = 1
__a = Image.open(os.path.join(self.data_dir , self.data[index]['''img'''] ) ).convert('''RGB''' )
__a = self.transforms(_a )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def __UpperCAmelCase ( self ):
__a = Counter()
for row in self.data:
label_freqs.update(row['''label'''] )
return label_freqs
def lowercase ( lowerCAmelCase__ : List[str] ) -> int:
__a = [len(row['''sentence'''] ) for row in batch]
__a , __a = len(lowerCAmelCase__ ), max(lowerCAmelCase__ )
__a = torch.zeros(lowerCAmelCase__ , lowerCAmelCase__ , dtype=torch.long )
__a = torch.zeros(lowerCAmelCase__ , lowerCAmelCase__ , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(lowerCAmelCase__ , lowerCAmelCase__ ) ):
__a = input_row['''sentence''']
__a = 1
__a = torch.stack([row['''image'''] for row in batch] )
__a = torch.stack([row['''label'''] for row in batch] )
__a = torch.stack([row['''image_start_token'''] for row in batch] )
__a = torch.stack([row['''image_end_token'''] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def lowercase ( ) -> int:
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def lowercase ( ) -> str:
return transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46_77_70_44, 0.44_53_14_29, 0.40_66_10_17] , std=[0.12_22_19_94, 0.12_14_58_35, 0.14_38_04_69] , ),
] )
| 11 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"RWKV/rwkv-4-169m-pile": "https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json",
"RWKV/rwkv-4-430m-pile": "https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json",
"RWKV/rwkv-4-1b5-pile": "https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json",
"RWKV/rwkv-4-3b-pile": "https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json",
"RWKV/rwkv-4-7b-pile": "https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json",
"RWKV/rwkv-4-14b-pile": "https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json",
"RWKV/rwkv-raven-1b5": "https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json",
"RWKV/rwkv-raven-3b": "https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json",
"RWKV/rwkv-raven-7b": "https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json",
"RWKV/rwkv-raven-14b": "https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json",
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Any = 'rwkv'
__UpperCAmelCase : Optional[Any] = {'max_position_embeddings': 'context_length'}
def __init__( self , _a=50_277 , _a=1_024 , _a=4_096 , _a=32 , _a=None , _a=None , _a=1E-5 , _a=0 , _a=0 , _a=6 , _a=False , _a=True , **_a , ):
__a = vocab_size
__a = context_length
__a = hidden_size
__a = num_hidden_layers
__a = attention_hidden_size if attention_hidden_size is not None else hidden_size
__a = intermediate_size if intermediate_size is not None else 4 * hidden_size
__a = layer_norm_epsilon
__a = rescale_every
__a = use_cache
__a = bos_token_id
__a = eos_token_id
super().__init__(
tie_word_embeddings=_a , bos_token_id=_a , eos_token_id=_a , **_a )
| 11 | 1 |
"""simple docstring"""
from __future__ import annotations
import numpy as np
def lowercase ( lowerCAmelCase__ : list[float] ) -> Optional[Any]:
return np.maximum(0 , lowerCAmelCase__ )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 11 |
"""simple docstring"""
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : List[str] = (UnCLIPScheduler,)
def __UpperCAmelCase ( self , **_a ):
__a = {
'''num_train_timesteps''': 1_000,
'''variance_type''': '''fixed_small_log''',
'''clip_sample''': True,
'''clip_sample_range''': 1.0,
'''prediction_type''': '''epsilon''',
}
config.update(**_a )
return config
def __UpperCAmelCase ( self ):
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_a )
def __UpperCAmelCase ( self ):
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=_a )
def __UpperCAmelCase ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_a )
def __UpperCAmelCase ( self ):
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=_a )
def __UpperCAmelCase ( self ):
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=_a )
def __UpperCAmelCase ( self ):
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=_a , prev_timestep=_a )
def __UpperCAmelCase ( self ):
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config(variance_type='''fixed_small_log''' )
__a = scheduler_class(**_a )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0_000E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.054_9625 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.999_4987 ) ) < 1E-5
def __UpperCAmelCase ( self ):
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config(variance_type='''learned_range''' )
__a = scheduler_class(**_a )
__a = 0.5
assert scheduler._get_variance(1 , predicted_variance=_a ) - -10.171_2790 < 1E-5
assert scheduler._get_variance(487 , predicted_variance=_a ) - -5.799_8052 < 1E-5
assert scheduler._get_variance(999 , predicted_variance=_a ) - -0.001_0011 < 1E-5
def __UpperCAmelCase ( self ):
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**_a )
__a = scheduler.timesteps
__a = self.dummy_model()
__a = self.dummy_sample_deter
__a = torch.manual_seed(0 )
for i, t in enumerate(_a ):
# 1. predict noise residual
__a = model(_a , _a )
# 2. predict previous mean of sample x_t-1
__a = scheduler.step(_a , _a , _a , generator=_a ).prev_sample
__a = pred_prev_sample
__a = torch.sum(torch.abs(_a ) )
__a = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 252.268_2495 ) < 1E-2
assert abs(result_mean.item() - 0.328_4743 ) < 1E-3
def __UpperCAmelCase ( self ):
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**_a )
scheduler.set_timesteps(25 )
__a = scheduler.timesteps
__a = self.dummy_model()
__a = self.dummy_sample_deter
__a = torch.manual_seed(0 )
for i, t in enumerate(_a ):
# 1. predict noise residual
__a = model(_a , _a )
if i + 1 == timesteps.shape[0]:
__a = None
else:
__a = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
__a = scheduler.step(
_a , _a , _a , prev_timestep=_a , generator=_a ).prev_sample
__a = pred_prev_sample
__a = torch.sum(torch.abs(_a ) )
__a = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 258.204_4983 ) < 1E-2
assert abs(result_mean.item() - 0.336_2038 ) < 1E-3
def __UpperCAmelCase ( self ):
pass
def __UpperCAmelCase ( self ):
pass
| 11 | 1 |
"""simple docstring"""
from __future__ import annotations
def lowercase ( lowerCAmelCase__ : list , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> list:
__a = []
__a , __a = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
__a = result + left + right
return input_list
def lowercase ( lowerCAmelCase__ : list ) -> list:
if len(lowerCAmelCase__ ) <= 1:
return input_list
__a = list(lowerCAmelCase__ )
# iteration for two-way merging
__a = 2
while p <= len(lowerCAmelCase__ ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(lowerCAmelCase__ ) , lowerCAmelCase__ ):
__a = i
__a = i + p - 1
__a = (low + high + 1) // 2
__a = merge(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# final merge of last two parts
if p * 2 >= len(lowerCAmelCase__ ):
__a = i
__a = merge(lowerCAmelCase__ , 0 , lowerCAmelCase__ , len(lowerCAmelCase__ ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
lowercase_ = input("Enter numbers separated by a comma:\n").strip()
if user_input == "":
lowercase_ = []
else:
lowercase_ = [int(item.strip()) for item in user_input.split(",")]
print(iter_merge_sort(unsorted))
| 11 |
"""simple docstring"""
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def lowercase ( lowerCAmelCase__ : Dict ) -> Optional[int]:
__a , __a = image.size
__a , __a = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
__a = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] )
__a = np.array(lowerCAmelCase__ ).astype(np.floataa ) / 2_55.0
__a = image[None].transpose(0 , 3 , 1 , 2 )
__a = torch.from_numpy(lowerCAmelCase__ )
return 2.0 * image - 1.0
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a , _a , _a , ):
super().__init__()
self.register_modules(vqvae=_a , unet=_a , scheduler=_a )
@torch.no_grad()
def __call__( self , _a = None , _a = 1 , _a = 100 , _a = 0.0 , _a = None , _a = "pil" , _a = True , ):
if isinstance(_a , PIL.Image.Image ):
__a = 1
elif isinstance(_a , torch.Tensor ):
__a = image.shape[0]
else:
raise ValueError(f'''`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(_a )}''' )
if isinstance(_a , PIL.Image.Image ):
__a = preprocess(_a )
__a , __a = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
__a = (batch_size, self.unet.config.in_channels // 2, height, width)
__a = next(self.unet.parameters() ).dtype
__a = randn_tensor(_a , generator=_a , device=self.device , dtype=_a )
__a = image.to(device=self.device , dtype=_a )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(_a , device=self.device )
__a = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
__a = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__a = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__a = {}
if accepts_eta:
__a = eta
for t in self.progress_bar(_a ):
# concat latents and low resolution image in the channel dimension.
__a = torch.cat([latents, image] , dim=1 )
__a = self.scheduler.scale_model_input(_a , _a )
# predict the noise residual
__a = self.unet(_a , _a ).sample
# compute the previous noisy sample x_t -> x_t-1
__a = self.scheduler.step(_a , _a , _a , **_a ).prev_sample
# decode the image latents with the VQVAE
__a = self.vqvae.decode(_a ).sample
__a = torch.clamp(_a , -1.0 , 1.0 )
__a = image / 2 + 0.5
__a = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__a = self.numpy_to_pil(_a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_a )
| 11 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
__a = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_a , '''width_multiplier''' ) )
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a , _a=13 , _a=64 , _a=2 , _a=3 , _a="swish" , _a=3 , _a=32 , _a=0.1 , _a=0.02 , _a=True , _a=True , _a=10 , _a=None , _a=0.25 , _a=0.0 , _a=0.0 , ):
__a = parent
__a = batch_size
__a = image_size
__a = patch_size
__a = num_channels
__a = make_divisible(512 * width_multiplier , divisor=8 )
__a = hidden_act
__a = conv_kernel_size
__a = output_stride
__a = classifier_dropout_prob
__a = use_labels
__a = is_training
__a = num_labels
__a = initializer_range
__a = scope
__a = width_multiplier
__a = ffn_dropout
__a = attn_dropout
def __UpperCAmelCase ( self ):
__a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.num_labels )
__a = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__a = self.get_config()
return config, pixel_values, labels, pixel_labels
def __UpperCAmelCase ( self ):
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def __UpperCAmelCase ( self , _a , _a , _a , _a ):
__a = MobileViTVaModel(config=_a )
model.to(_a )
model.eval()
__a = model(_a )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __UpperCAmelCase ( self , _a , _a , _a , _a ):
__a = self.num_labels
__a = MobileViTVaForImageClassification(_a )
model.to(_a )
model.eval()
__a = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self , _a , _a , _a , _a ):
__a = self.num_labels
__a = MobileViTVaForSemanticSegmentation(_a )
model.to(_a )
model.eval()
__a = model(_a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__a = model(_a , labels=_a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __UpperCAmelCase ( self ):
__a = self.prepare_config_and_inputs()
__a , __a , __a , __a = config_and_inputs
__a = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
__UpperCAmelCase : Union[str, Any] = (
{
'feature-extraction': MobileViTVaModel,
'image-classification': MobileViTVaForImageClassification,
'image-segmentation': MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : Union[str, Any] = False
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : List[str] = False
def __UpperCAmelCase ( self ):
__a = MobileViTVaModelTester(self )
__a = MobileViTVaConfigTester(self , config_class=_a , has_text_modality=_a )
def __UpperCAmelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileViTV2 does not use inputs_embeds''' )
def __UpperCAmelCase ( self ):
pass
@unittest.skip(reason='''MobileViTV2 does not support input and output embeddings''' )
def __UpperCAmelCase ( self ):
pass
@unittest.skip(reason='''MobileViTV2 does not output attentions''' )
def __UpperCAmelCase ( self ):
pass
@require_torch_multi_gpu
@unittest.skip(reason='''Got `CUDA error: misaligned address` for tests after this one being run.''' )
def __UpperCAmelCase ( self ):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __UpperCAmelCase ( self ):
pass
def __UpperCAmelCase ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(_a )
__a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __UpperCAmelCase ( self ):
def check_hidden_states_output(_a , _a , _a ):
__a = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(_a , _a ) )
__a = outputs.hidden_states
__a = 5
self.assertEqual(len(_a ) , _a )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__a = 2
for i in range(len(_a ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a = True
check_hidden_states_output(_a , _a , _a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_a )
@slow
def __UpperCAmelCase ( self ):
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = MobileViTVaModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def lowercase ( ) -> str:
__a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCAmelCase ( self ):
return (
MobileViTImageProcessor.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' )
if is_vision_available()
else None
)
@slow
def __UpperCAmelCase ( self ):
__a = MobileViTVaForImageClassification.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' ).to(
_a )
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
__a = model(**_a )
# verify the logits
__a = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , _a )
__a = torch.tensor([-1.6_336E00, -7.3_204E-02, -5.1_883E-01] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1E-4 ) )
@slow
def __UpperCAmelCase ( self ):
__a = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
__a = model.to(_a )
__a = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
__a = prepare_img()
__a = image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
__a = model(**_a )
__a = outputs.logits
# verify the logits
__a = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , _a )
__a = torch.tensor(
[
[[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]],
[[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]],
[[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]],
] , device=_a , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _a , atol=1E-4 ) )
@slow
def __UpperCAmelCase ( self ):
__a = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
__a = model.to(_a )
__a = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
__a = prepare_img()
__a = image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
__a = model(**_a )
__a = outputs.logits.detach().cpu()
__a = image_processor.post_process_semantic_segmentation(outputs=_a , target_sizes=[(50, 60)] )
__a = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , _a )
__a = image_processor.post_process_semantic_segmentation(outputs=_a )
__a = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , _a )
| 11 |
"""simple docstring"""
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Dict = DistilBertTokenizer
__UpperCAmelCase : Any = DistilBertTokenizerFast
__UpperCAmelCase : int = True
@slow
def __UpperCAmelCase ( self ):
__a = DistilBertTokenizer.from_pretrained('''distilbert-base-uncased''' )
__a = tokenizer.encode('''sequence builders''' , add_special_tokens=_a )
__a = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_a )
__a = tokenizer.build_inputs_with_special_tokens(_a )
__a = tokenizer.build_inputs_with_special_tokens(_a , _a )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 11 | 1 |
"""simple docstring"""
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Dict = DistilBertTokenizer
__UpperCAmelCase : Any = DistilBertTokenizerFast
__UpperCAmelCase : int = True
@slow
def __UpperCAmelCase ( self ):
__a = DistilBertTokenizer.from_pretrained('''distilbert-base-uncased''' )
__a = tokenizer.encode('''sequence builders''' , add_special_tokens=_a )
__a = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_a )
__a = tokenizer.build_inputs_with_special_tokens(_a )
__a = tokenizer.build_inputs_with_special_tokens(_a , _a )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 11 |
"""simple docstring"""
from math import factorial, radians
def lowercase ( lowerCAmelCase__ : float , lowerCAmelCase__ : int = 18 , lowerCAmelCase__ : int = 10 ) -> float:
__a = angle_in_degrees - ((angle_in_degrees // 3_60.0) * 3_60.0)
# Converting from degrees to radians
__a = radians(lowerCAmelCase__ )
__a = angle_in_radians
__a = 3
__a = -1
for _ in range(lowerCAmelCase__ ):
result += (b * (angle_in_radians**a)) / factorial(lowerCAmelCase__ )
__a = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
__import__("doctest").testmod()
| 11 | 1 |
"""simple docstring"""
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Any = ComputeEnvironment.AMAZON_SAGEMAKER
__UpperCAmelCase : Tuple = True
__UpperCAmelCase : List[Any] = 'ml.p3.2xlarge'
__UpperCAmelCase : List[str] = 'accelerate_sagemaker_execution_role'
__UpperCAmelCase : Optional[Any] = 'hf-sm'
__UpperCAmelCase : Optional[int] = 'us-east-1'
__UpperCAmelCase : List[Any] = 1
__UpperCAmelCase : List[str] = 'accelerate-sagemaker-1'
__UpperCAmelCase : int = '1.6'
__UpperCAmelCase : List[Any] = '4.4'
__UpperCAmelCase : Any = 'train.py'
__UpperCAmelCase : Tuple = [
'--model_name_or_path',
'bert',
'--do_train',
'False',
'--epochs',
'3',
'--learning_rate',
'5e-5',
'--max_steps',
'50.5',
]
__UpperCAmelCase : Optional[Any] = [
'--model_name_or_path',
'bert',
'--do_train',
'--do_test',
'False',
'--do_predict',
'--epochs',
'3',
'--learning_rate',
'5e-5',
'--max_steps',
'50.5',
]
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
__a = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args['''model_name_or_path'''] , _a )
assert isinstance(converted_args['''do_train'''] , _a )
assert isinstance(converted_args['''epochs'''] , _a )
assert isinstance(converted_args['''learning_rate'''] , _a )
assert isinstance(converted_args['''max_steps'''] , _a )
with pytest.raises(_a ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
| 11 |
"""simple docstring"""
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def lowercase ( lowerCAmelCase__ : dict ) -> tuple:
return (data["data"], data["target"])
def lowercase ( lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : np.ndarray ) -> np.ndarray:
__a = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(lowerCAmelCase__ , lowerCAmelCase__ )
# Predict target for test data
__a = xgb.predict(lowerCAmelCase__ )
__a = predictions.reshape(len(lowerCAmelCase__ ) , 1 )
return predictions
def lowercase ( ) -> None:
__a = fetch_california_housing()
__a , __a = data_handling(lowerCAmelCase__ )
__a , __a , __a , __a = train_test_split(
lowerCAmelCase__ , lowerCAmelCase__ , test_size=0.25 , random_state=1 )
__a = xgboost(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Error printing
print(f'''Mean Absolute Error : {mean_absolute_error(lowerCAmelCase__ , lowerCAmelCase__ )}''' )
print(f'''Mean Square Error : {mean_squared_error(lowerCAmelCase__ , lowerCAmelCase__ )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 11 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
lowercase_ = None
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
lowercase_ = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"
),
},
}
lowercase_ = {
"facebook/nllb-large-en-ro": 1_0_2_4,
"facebook/nllb-200-distilled-600M": 1_0_2_4,
}
# fmt: off
lowercase_ = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Any = VOCAB_FILES_NAMES
__UpperCAmelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : List[str] = ['input_ids', 'attention_mask']
__UpperCAmelCase : Any = NllbTokenizer
__UpperCAmelCase : List[int] = []
__UpperCAmelCase : List[int] = []
def __init__( self , _a=None , _a=None , _a="<s>" , _a="</s>" , _a="</s>" , _a="<s>" , _a="<unk>" , _a="<pad>" , _a="<mask>" , _a=None , _a=None , _a=None , _a=False , **_a , ):
# Mask token behave like a normal word, i.e. include the space before it
__a = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
__a = legacy_behaviour
super().__init__(
vocab_file=_a , tokenizer_file=_a , bos_token=_a , eos_token=_a , sep_token=_a , cls_token=_a , unk_token=_a , pad_token=_a , mask_token=_a , src_lang=_a , tgt_lang=_a , additional_special_tokens=_a , legacy_behaviour=_a , **_a , )
__a = vocab_file
__a = False if not self.vocab_file else True
__a = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
__a = {
lang_code: self.convert_tokens_to_ids(_a ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
__a = src_lang if src_lang is not None else '''eng_Latn'''
__a = self.convert_tokens_to_ids(self._src_lang )
__a = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __UpperCAmelCase ( self ):
return self._src_lang
@src_lang.setter
def __UpperCAmelCase ( self , _a ):
__a = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __UpperCAmelCase ( self , _a , _a = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __UpperCAmelCase ( self , _a , _a = None ):
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCAmelCase ( self , _a , _a , _a , _a , **_a ):
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
__a = src_lang
__a = self(_a , add_special_tokens=_a , return_tensors=_a , **_a )
__a = self.convert_tokens_to_ids(_a )
__a = tgt_lang_id
return inputs
def __UpperCAmelCase ( self , _a , _a = "eng_Latn" , _a = None , _a = "fra_Latn" , **_a , ):
__a = src_lang
__a = tgt_lang
return super().prepare_seqaseq_batch(_a , _a , **_a )
def __UpperCAmelCase ( self ):
return self.set_src_lang_special_tokens(self.src_lang )
def __UpperCAmelCase ( self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __UpperCAmelCase ( self , _a ):
__a = self.convert_tokens_to_ids(_a )
if self.legacy_behaviour:
__a = []
__a = [self.eos_token_id, self.cur_lang_code]
else:
__a = [self.cur_lang_code]
__a = [self.eos_token_id]
__a = self.convert_ids_to_tokens(self.prefix_tokens )
__a = self.convert_ids_to_tokens(self.suffix_tokens )
__a = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __UpperCAmelCase ( self , _a ):
__a = self.convert_tokens_to_ids(_a )
if self.legacy_behaviour:
__a = []
__a = [self.eos_token_id, self.cur_lang_code]
else:
__a = [self.cur_lang_code]
__a = [self.eos_token_id]
__a = self.convert_ids_to_tokens(self.prefix_tokens )
__a = self.convert_ids_to_tokens(self.suffix_tokens )
__a = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __UpperCAmelCase ( self , _a , _a = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(_a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory.''' )
return
__a = os.path.join(
_a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file , _a )
return (out_vocab_file,)
| 11 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase_ = {
"configuration_efficientformer": [
"EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientFormerConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["EfficientFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientFormerForImageClassification",
"EfficientFormerForImageClassificationWithTeacher",
"EfficientFormerModel",
"EfficientFormerPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFEfficientFormerForImageClassification",
"TFEfficientFormerForImageClassificationWithTeacher",
"TFEfficientFormerModel",
"TFEfficientFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 11 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {
"configuration_nllb_moe": [
"NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP",
"NllbMoeConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST",
"NllbMoeForConditionalGeneration",
"NllbMoeModel",
"NllbMoePreTrainedModel",
"NllbMoeTop2Router",
"NllbMoeSparseMLP",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 11 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/config.json",
"funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json",
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/config.json",
"funnel-transformer/medium-base": "https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json",
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/config.json",
"funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json",
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json",
"funnel-transformer/xlarge-base": "https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json",
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = 'funnel'
__UpperCAmelCase : Union[str, Any] = {
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
}
def __init__( self , _a=30_522 , _a=[4, 4, 4] , _a=None , _a=2 , _a=768 , _a=12 , _a=64 , _a=3_072 , _a="gelu_new" , _a=0.1 , _a=0.1 , _a=0.0 , _a=0.1 , _a=None , _a=1E-9 , _a="mean" , _a="relative_shift" , _a=True , _a=True , _a=True , **_a , ):
__a = vocab_size
__a = block_sizes
__a = [1] * len(_a ) if block_repeats is None else block_repeats
assert len(_a ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
__a = num_decoder_layers
__a = d_model
__a = n_head
__a = d_head
__a = d_inner
__a = hidden_act
__a = hidden_dropout
__a = attention_dropout
__a = activation_dropout
__a = initializer_range
__a = initializer_std
__a = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], f'''Got {pooling_type} for `pooling_type` but only \'mean\' and \'max\' are supported.'''
__a = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], f'''Got {attention_type} for `attention_type` but only \'relative_shift\' and \'factorized\' are supported.'''
__a = attention_type
__a = separate_cls
__a = truncate_seq
__a = pool_q_only
super().__init__(**_a )
@property
def __UpperCAmelCase ( self ):
return sum(self.block_sizes )
@num_hidden_layers.setter
def __UpperCAmelCase ( self , _a ):
raise NotImplementedError(
'''This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.''' )
@property
def __UpperCAmelCase ( self ):
return len(self.block_sizes )
@num_blocks.setter
def __UpperCAmelCase ( self , _a ):
raise NotImplementedError('''This model does not support the setting of `num_blocks`. Please set `block_sizes`.''' )
| 11 | 1 |
"""simple docstring"""
import socket
def lowercase ( ) -> Dict:
__a = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
__a = socket.gethostname()
__a = 12312
sock.connect((host, port) )
sock.send(b'''Hello server!''' )
with open('''Received_file''' , '''wb''' ) as out_file:
print('''File opened''' )
print('''Receiving data...''' )
while True:
__a = sock.recv(1024 )
if not data:
break
out_file.write(lowerCAmelCase__ )
print('''Successfully received the file''' )
sock.close()
print('''Connection closed''' )
if __name__ == "__main__":
main()
| 11 |
"""simple docstring"""
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
__a = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_a , '''width_multiplier''' ) )
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a , _a=13 , _a=64 , _a=2 , _a=3 , _a="swish" , _a=3 , _a=32 , _a=0.1 , _a=0.02 , _a=True , _a=True , _a=10 , _a=None , _a=0.25 , _a=0.0 , _a=0.0 , ):
__a = parent
__a = batch_size
__a = image_size
__a = patch_size
__a = num_channels
__a = make_divisible(512 * width_multiplier , divisor=8 )
__a = hidden_act
__a = conv_kernel_size
__a = output_stride
__a = classifier_dropout_prob
__a = use_labels
__a = is_training
__a = num_labels
__a = initializer_range
__a = scope
__a = width_multiplier
__a = ffn_dropout
__a = attn_dropout
def __UpperCAmelCase ( self ):
__a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.num_labels )
__a = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__a = self.get_config()
return config, pixel_values, labels, pixel_labels
def __UpperCAmelCase ( self ):
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def __UpperCAmelCase ( self , _a , _a , _a , _a ):
__a = MobileViTVaModel(config=_a )
model.to(_a )
model.eval()
__a = model(_a )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __UpperCAmelCase ( self , _a , _a , _a , _a ):
__a = self.num_labels
__a = MobileViTVaForImageClassification(_a )
model.to(_a )
model.eval()
__a = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self , _a , _a , _a , _a ):
__a = self.num_labels
__a = MobileViTVaForSemanticSegmentation(_a )
model.to(_a )
model.eval()
__a = model(_a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__a = model(_a , labels=_a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __UpperCAmelCase ( self ):
__a = self.prepare_config_and_inputs()
__a , __a , __a , __a = config_and_inputs
__a = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
__UpperCAmelCase : Union[str, Any] = (
{
'feature-extraction': MobileViTVaModel,
'image-classification': MobileViTVaForImageClassification,
'image-segmentation': MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : Union[str, Any] = False
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : List[str] = False
def __UpperCAmelCase ( self ):
__a = MobileViTVaModelTester(self )
__a = MobileViTVaConfigTester(self , config_class=_a , has_text_modality=_a )
def __UpperCAmelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileViTV2 does not use inputs_embeds''' )
def __UpperCAmelCase ( self ):
pass
@unittest.skip(reason='''MobileViTV2 does not support input and output embeddings''' )
def __UpperCAmelCase ( self ):
pass
@unittest.skip(reason='''MobileViTV2 does not output attentions''' )
def __UpperCAmelCase ( self ):
pass
@require_torch_multi_gpu
@unittest.skip(reason='''Got `CUDA error: misaligned address` for tests after this one being run.''' )
def __UpperCAmelCase ( self ):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __UpperCAmelCase ( self ):
pass
def __UpperCAmelCase ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(_a )
__a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __UpperCAmelCase ( self ):
def check_hidden_states_output(_a , _a , _a ):
__a = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(_a , _a ) )
__a = outputs.hidden_states
__a = 5
self.assertEqual(len(_a ) , _a )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__a = 2
for i in range(len(_a ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a = True
check_hidden_states_output(_a , _a , _a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_a )
@slow
def __UpperCAmelCase ( self ):
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = MobileViTVaModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def lowercase ( ) -> str:
__a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCAmelCase ( self ):
return (
MobileViTImageProcessor.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' )
if is_vision_available()
else None
)
@slow
def __UpperCAmelCase ( self ):
__a = MobileViTVaForImageClassification.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' ).to(
_a )
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
__a = model(**_a )
# verify the logits
__a = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , _a )
__a = torch.tensor([-1.6_336E00, -7.3_204E-02, -5.1_883E-01] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1E-4 ) )
@slow
def __UpperCAmelCase ( self ):
__a = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
__a = model.to(_a )
__a = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
__a = prepare_img()
__a = image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
__a = model(**_a )
__a = outputs.logits
# verify the logits
__a = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , _a )
__a = torch.tensor(
[
[[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]],
[[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]],
[[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]],
] , device=_a , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _a , atol=1E-4 ) )
@slow
def __UpperCAmelCase ( self ):
__a = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
__a = model.to(_a )
__a = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
__a = prepare_img()
__a = image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
__a = model(**_a )
__a = outputs.logits.detach().cpu()
__a = image_processor.post_process_semantic_segmentation(outputs=_a , target_sizes=[(50, 60)] )
__a = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , _a )
__a = image_processor.post_process_semantic_segmentation(outputs=_a )
__a = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , _a )
| 11 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = CTRLTokenizer
__UpperCAmelCase : Optional[Any] = False
__UpperCAmelCase : List[str] = False
def __UpperCAmelCase ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__a = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
__a = dict(zip(_a , range(len(_a ) ) ) )
__a = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
__a = {'''unk_token''': '''<unk>'''}
__a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_a ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_a ) )
def __UpperCAmelCase ( self , **_a ):
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **_a )
def __UpperCAmelCase ( self , _a ):
__a = '''adapt react readapt apt'''
__a = '''adapt react readapt apt'''
return input_text, output_text
def __UpperCAmelCase ( self ):
__a = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__a = '''adapt react readapt apt'''
__a = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
__a = tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
__a = tokens + [tokenizer.unk_token]
__a = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , _a )
| 11 |
"""simple docstring"""
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
lowercase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ):
super().__init__()
if hasattr(scheduler.config , '''steps_offset''' ) and scheduler.config.steps_offset != 1:
__a = (
f'''The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'''
f''' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '''
'''to update the config accordingly as leaving `steps_offset` might led to incorrect results'''
''' in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,'''
''' it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`'''
''' file'''
)
deprecate('''steps_offset!=1''' , '''1.0.0''' , _a , standard_warn=_a )
__a = dict(scheduler.config )
__a = 1
__a = FrozenDict(_a )
if hasattr(scheduler.config , '''skip_prk_steps''' ) and scheduler.config.skip_prk_steps is False:
__a = (
f'''The configuration file of this scheduler: {scheduler} has not set the configuration'''
''' `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make'''
''' sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to'''
''' incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face'''
''' Hub, it would be very nice if you could open a Pull request for the'''
''' `scheduler/scheduler_config.json` file'''
)
deprecate('''skip_prk_steps not set''' , '''1.0.0''' , _a , standard_warn=_a )
__a = dict(scheduler.config )
__a = True
__a = FrozenDict(_a )
if safety_checker is None:
logger.warning(
f'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
segmentation_model=_a , segmentation_processor=_a , vae=_a , text_encoder=_a , tokenizer=_a , unet=_a , scheduler=_a , safety_checker=_a , feature_extractor=_a , )
def __UpperCAmelCase ( self , _a = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__a = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_a )
def __UpperCAmelCase ( self ):
self.enable_attention_slicing(_a )
def __UpperCAmelCase ( self ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
__a = torch.device('''cuda''' )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(_a , _a )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __UpperCAmelCase ( self ):
if self.device != torch.device('''meta''' ) or not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_a , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self , _a , _a , _a , _a = 512 , _a = 512 , _a = 50 , _a = 7.5 , _a = None , _a = 1 , _a = 0.0 , _a = None , _a = None , _a = "pil" , _a = True , _a = None , _a = 1 , **_a , ):
__a = self.segmentation_processor(
text=[text] , images=[image] , padding='''max_length''' , return_tensors='''pt''' ).to(self.device )
__a = self.segmentation_model(**_a )
__a = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
__a = self.numpy_to_pil(_a )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
__a = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=_a , image=_a , mask_image=_a , height=_a , width=_a , num_inference_steps=_a , guidance_scale=_a , negative_prompt=_a , num_images_per_prompt=_a , eta=_a , generator=_a , latents=_a , output_type=_a , return_dict=_a , callback=_a , callback_steps=_a , )
| 11 | 1 |
"""simple docstring"""
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def lowercase ( lowerCAmelCase__ : int , lowerCAmelCase__ : Dict=None ) -> Tuple:
__a = None
if token is not None:
__a = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': f'''Bearer {token}'''}
__a = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
__a = requests.get(lowerCAmelCase__ , headers=lowerCAmelCase__ ).json()
__a = {}
try:
job_links.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
__a = math.ceil((result['''total_count'''] - 100) / 100 )
for i in range(lowerCAmelCase__ ):
__a = requests.get(url + f'''&page={i + 2}''' , headers=lowerCAmelCase__ ).json()
job_links.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
return job_links
except Exception:
print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def lowercase ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[int]=None ) -> Any:
__a = None
if token is not None:
__a = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': f'''Bearer {token}'''}
__a = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'''
__a = requests.get(lowerCAmelCase__ , headers=lowerCAmelCase__ ).json()
__a = {}
try:
artifacts.update({artifact['''name''']: artifact['''archive_download_url'''] for artifact in result['''artifacts''']} )
__a = math.ceil((result['''total_count'''] - 100) / 100 )
for i in range(lowerCAmelCase__ ):
__a = requests.get(url + f'''&page={i + 2}''' , headers=lowerCAmelCase__ ).json()
artifacts.update({artifact['''name''']: artifact['''archive_download_url'''] for artifact in result['''artifacts''']} )
return artifacts
except Exception:
print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def lowercase ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[int] ) -> Optional[Any]:
__a = None
if token is not None:
__a = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': f'''Bearer {token}'''}
__a = requests.get(lowerCAmelCase__ , headers=lowerCAmelCase__ , allow_redirects=lowerCAmelCase__ )
__a = result.headers['''Location''']
__a = requests.get(lowerCAmelCase__ , allow_redirects=lowerCAmelCase__ )
__a = os.path.join(lowerCAmelCase__ , f'''{artifact_name}.zip''' )
with open(lowerCAmelCase__ , '''wb''' ) as fp:
fp.write(response.content )
def lowercase ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Dict=None ) -> Optional[Any]:
__a = []
__a = []
__a = None
with zipfile.ZipFile(lowerCAmelCase__ ) as z:
for filename in z.namelist():
if not os.path.isdir(lowerCAmelCase__ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(lowerCAmelCase__ ) as f:
for line in f:
__a = line.decode('''UTF-8''' ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
__a = line[: line.index(''': ''' )]
__a = line[line.index(''': ''' ) + len(''': ''' ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith('''FAILED ''' ):
# `test` is the test method that failed
__a = line[len('''FAILED ''' ) :]
failed_tests.append(lowerCAmelCase__ )
elif filename == "job_name.txt":
__a = line
if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ):
raise ValueError(
f'''`errors` and `failed_tests` should have the same number of elements. Got {len(lowerCAmelCase__ )} for `errors` '''
f'''and {len(lowerCAmelCase__ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'''
''' problem.''' )
__a = None
if job_name and job_links:
__a = job_links.get(lowerCAmelCase__ , lowerCAmelCase__ )
# A list with elements of the form (line of error, error, failed test)
__a = [x + [y] + [job_link] for x, y in zip(lowerCAmelCase__ , lowerCAmelCase__ )]
return result
def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[int]=None ) -> Any:
__a = []
__a = [os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) for p in os.listdir(lowerCAmelCase__ ) if p.endswith('''.zip''' )]
for p in paths:
errors.extend(get_errors_from_single_artifact(lowerCAmelCase__ , job_links=lowerCAmelCase__ ) )
return errors
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Union[str, Any]=None ) -> List[str]:
__a = Counter()
counter.update([x[1] for x in logs] )
__a = counter.most_common()
__a = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
__a = {'''count''': count, '''failed_tests''': [(x[2], x[0]) for x in logs if x[1] == error]}
__a = dict(sorted(r.items() , key=lambda lowerCAmelCase__ : item[1]["count"] , reverse=lowerCAmelCase__ ) )
return r
def lowercase ( lowerCAmelCase__ : Optional[Any] ) -> int:
__a = test.split('''::''' )[0]
if test.startswith('''tests/models/''' ):
__a = test.split('''/''' )[2]
else:
__a = None
return test
def lowercase ( lowerCAmelCase__ : int , lowerCAmelCase__ : List[Any]=None ) -> str:
__a = [(x[0], x[1], get_model(x[2] )) for x in logs]
__a = [x for x in logs if x[2] is not None]
__a = {x[2] for x in logs}
__a = {}
for test in tests:
__a = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
__a = counter.most_common()
__a = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
__a = sum(error_counts.values() )
if n_errors > 0:
__a = {'''count''': n_errors, '''errors''': error_counts}
__a = dict(sorted(r.items() , key=lambda lowerCAmelCase__ : item[1]["count"] , reverse=lowerCAmelCase__ ) )
return r
def lowercase ( lowerCAmelCase__ : Union[str, Any] ) -> List[str]:
__a = '''| no. | error | status |'''
__a = '''|-:|:-|:-|'''
__a = [header, sep]
for error in reduced_by_error:
__a = reduced_by_error[error]['''count''']
__a = f'''| {count} | {error[:100]} | |'''
lines.append(lowerCAmelCase__ )
return "\n".join(lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : int ) -> Optional[Any]:
__a = '''| model | no. of errors | major error | count |'''
__a = '''|-:|-:|-:|-:|'''
__a = [header, sep]
for model in reduced_by_model:
__a = reduced_by_model[model]['''count''']
__a , __a = list(reduced_by_model[model]['''errors'''].items() )[0]
__a = f'''| {model} | {count} | {error[:60]} | {_count} |'''
lines.append(lowerCAmelCase__ )
return "\n".join(lowerCAmelCase__ )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
lowercase_ = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
lowercase_ = get_job_links(args.workflow_run_id, token=args.token)
lowercase_ = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
lowercase_ = k.find(" / ")
lowercase_ = k[index + len(" / ") :]
lowercase_ = v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
lowercase_ = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
lowercase_ = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
lowercase_ = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
lowercase_ = counter.most_common(3_0)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
lowercase_ = reduce_by_error(errors)
lowercase_ = reduce_by_model(errors)
lowercase_ = make_github_table(reduced_by_error)
lowercase_ = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
| 11 |
"""simple docstring"""
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
lowercase_ = logging.get_logger(__name__)
@add_end_docstrings(__SCREAMING_SNAKE_CASE )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , **_a ):
super().__init__(**_a )
if self.framework != "pt":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
# No specific FOR_XXX available yet
def __call__( self , _a , **_a ):
return super().__call__(_a , **_a )
def __UpperCAmelCase ( self , **_a ):
__a = {}
if "candidate_labels" in kwargs:
__a = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
__a = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def __UpperCAmelCase ( self , _a , _a=None , _a="This is a sound of {}." ):
if isinstance(_a , _a ):
if audio.startswith('''http://''' ) or audio.startswith('''https://''' ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
__a = requests.get(_a ).content
else:
with open(_a , '''rb''' ) as f:
__a = f.read()
if isinstance(_a , _a ):
__a = ffmpeg_read(_a , self.feature_extractor.sampling_rate )
if not isinstance(_a , np.ndarray ):
raise ValueError('''We expect a numpy ndarray as input''' )
if len(audio.shape ) != 1:
raise ValueError('''We expect a single channel audio input for ZeroShotAudioClassificationPipeline''' )
__a = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors='''pt''' )
__a = candidate_labels
__a = [hypothesis_template.format(_a ) for x in candidate_labels]
__a = self.tokenizer(_a , return_tensors=self.framework , padding=_a )
__a = [text_inputs]
return inputs
def __UpperCAmelCase ( self , _a ):
__a = model_inputs.pop('''candidate_labels''' )
__a = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] , _a ):
__a = text_inputs[0]
else:
# Batching case.
__a = text_inputs[0][0]
__a = self.model(**_a , **_a )
__a = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_audio,
}
return model_outputs
def __UpperCAmelCase ( self , _a ):
__a = model_outputs.pop('''candidate_labels''' )
__a = model_outputs['''logits'''][0]
if self.framework == "pt":
__a = logits.softmax(dim=0 )
__a = probs.tolist()
else:
raise ValueError('''`tf` framework not supported.''' )
__a = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(_a , _a ) , key=lambda _a : -x[0] )
]
return result
| 11 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
__UpperCAmelCase : int
__UpperCAmelCase : Node | None = None
__UpperCAmelCase : Node | None = None
def lowercase ( ) -> Node | None:
__a = Node(1 )
__a = Node(2 )
__a = Node(3 )
__a = Node(4 )
__a = Node(5 )
return tree
def lowercase ( lowerCAmelCase__ : Node | None ) -> list[int]:
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def lowercase ( lowerCAmelCase__ : Node | None ) -> list[int]:
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def lowercase ( lowerCAmelCase__ : Node | None ) -> list[int]:
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def lowercase ( lowerCAmelCase__ : Node | None ) -> int:
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def lowercase ( lowerCAmelCase__ : Node | None ) -> Sequence[Node | None]:
__a = []
if root is None:
return output
__a = deque([root] )
while process_queue:
__a = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def lowercase ( lowerCAmelCase__ : Node | None , lowerCAmelCase__ : int ) -> Sequence[Node | None]:
__a = []
def populate_output(lowerCAmelCase__ : Node | None , lowerCAmelCase__ : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(lowerCAmelCase__ , lowerCAmelCase__ )
return output
def lowercase ( lowerCAmelCase__ : Node | None , lowerCAmelCase__ : int ) -> Sequence[Node | None]:
__a = []
def populate_output(lowerCAmelCase__ : Node | None , lowerCAmelCase__ : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(lowerCAmelCase__ , lowerCAmelCase__ )
return output
def lowercase ( lowerCAmelCase__ : Node | None ) -> Sequence[Node | None] | list[Any]:
if root is None:
return []
__a = []
__a = 0
__a = height(lowerCAmelCase__ )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(lowerCAmelCase__ , lowerCAmelCase__ ) )
__a = 1
else:
output.append(get_nodes_from_right_to_left(lowerCAmelCase__ , lowerCAmelCase__ ) )
__a = 0
return output
def lowercase ( ) -> None: # Main function for testing.
__a = make_tree()
print(f'''In-order Traversal: {inorder(lowerCAmelCase__ )}''' )
print(f'''Pre-order Traversal: {preorder(lowerCAmelCase__ )}''' )
print(f'''Post-order Traversal: {postorder(lowerCAmelCase__ )}''' , '''\n''' )
print(f'''Height of Tree: {height(lowerCAmelCase__ )}''' , '''\n''' )
print('''Complete Level Order Traversal: ''' )
print(level_order(lowerCAmelCase__ ) , '''\n''' )
print('''Level-wise order Traversal: ''' )
for level in range(1 , height(lowerCAmelCase__ ) + 1 ):
print(f'''Level {level}:''' , get_nodes_from_left_to_right(lowerCAmelCase__ , level=lowerCAmelCase__ ) )
print('''\nZigZag order Traversal: ''' )
print(zigzag(lowerCAmelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 11 |
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self , _a ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ):
__a = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(_a )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
__a = '''sgugger/tiny-distilbert-classification'''
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , only_pretrain_model=_a , )
__a = PyTorchBenchmark(_a )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , torchscript=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , fpaa=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = AutoConfig.from_pretrained(_a )
# set architectures equal to `None`
__a = None
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a , configs=[config] )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == '''cpu''' , '''Can\'t do half precision''' )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , fpaa=_a , multi_process=_a , )
__a = PyTorchBenchmark(_a )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = AutoConfig.from_pretrained(_a )
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a , configs=[config] )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tinier_bart'''
__a = AutoConfig.from_pretrained(_a )
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a , configs=[config] )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = AutoConfig.from_pretrained(_a )
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a , configs=[config] )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tinier_bart'''
__a = AutoConfig.from_pretrained(_a )
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a , configs=[config] )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , save_to_csv=_a , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_a , '''inf_time.csv''' ) , train_memory_csv_file=os.path.join(_a , '''train_mem.csv''' ) , inference_memory_csv_file=os.path.join(_a , '''inf_mem.csv''' ) , train_time_csv_file=os.path.join(_a , '''train_time.csv''' ) , env_info_csv_file=os.path.join(_a , '''env.csv''' ) , multi_process=_a , )
__a = PyTorchBenchmark(_a )
benchmark.run()
self.assertTrue(Path(os.path.join(_a , '''inf_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_a , '''train_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_a , '''inf_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_a , '''train_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_a , '''env.csv''' ) ).exists() )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(_a ):
self.assertTrue(hasattr(_a , '''sequential''' ) )
self.assertTrue(hasattr(_a , '''cumulative''' ) )
self.assertTrue(hasattr(_a , '''current''' ) )
self.assertTrue(hasattr(_a , '''total''' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_a , '''log.txt''' ) , log_print=_a , trace_memory_line_by_line=_a , multi_process=_a , )
__a = PyTorchBenchmark(_a )
__a = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(_a , '''log.txt''' ) ).exists() )
| 11 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"facebook/deit-base-distilled-patch16-224": (
"https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json"
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = 'deit'
def __init__( self , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.0 , _a=0.0 , _a=0.02 , _a=1E-12 , _a=224 , _a=16 , _a=3 , _a=True , _a=16 , **_a , ):
super().__init__(**_a )
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = initializer_range
__a = layer_norm_eps
__a = image_size
__a = patch_size
__a = num_channels
__a = qkv_bias
__a = encoder_stride
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Tuple = version.parse('1.11' )
@property
def __UpperCAmelCase ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __UpperCAmelCase ( self ):
return 1E-4
| 11 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FocalNetForImageClassification",
"FocalNetForMaskedImageModeling",
"FocalNetBackbone",
"FocalNetModel",
"FocalNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 11 | 1 |
"""simple docstring"""
import pickle
import numpy as np
from matplotlib import pyplot as plt
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a , _a , _a , _a , _a , _a=0.2 , _a=0.2 ):
__a = bp_numa
__a = bp_numa
__a = bp_numa
__a = conva_get[:2]
__a = conva_get[2]
__a = size_pa
__a = rate_w
__a = rate_t
__a = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
__a = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
__a = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
__a = -2 * np.random.rand(self.conva[1] ) + 1
__a = -2 * np.random.rand(self.num_bpa ) + 1
__a = -2 * np.random.rand(self.num_bpa ) + 1
def __UpperCAmelCase ( self , _a ):
# save model dict with pickle
__a = {
'''num_bp1''': self.num_bpa,
'''num_bp2''': self.num_bpa,
'''num_bp3''': self.num_bpa,
'''conv1''': self.conva,
'''step_conv1''': self.step_conva,
'''size_pooling1''': self.size_poolinga,
'''rate_weight''': self.rate_weight,
'''rate_thre''': self.rate_thre,
'''w_conv1''': self.w_conva,
'''wkj''': self.wkj,
'''vji''': self.vji,
'''thre_conv1''': self.thre_conva,
'''thre_bp2''': self.thre_bpa,
'''thre_bp3''': self.thre_bpa,
}
with open(_a , '''wb''' ) as f:
pickle.dump(_a , _a )
print(f'''Model saved: {save_path}''' )
@classmethod
def __UpperCAmelCase ( cls , _a ):
# read saved model
with open(_a , '''rb''' ) as f:
__a = pickle.load(_a ) # noqa: S301
__a = model_dic.get('''conv1''' )
conv_get.append(model_dic.get('''step_conv1''' ) )
__a = model_dic.get('''size_pooling1''' )
__a = model_dic.get('''num_bp1''' )
__a = model_dic.get('''num_bp2''' )
__a = model_dic.get('''num_bp3''' )
__a = model_dic.get('''rate_weight''' )
__a = model_dic.get('''rate_thre''' )
# create model instance
__a = CNN(_a , _a , _a , _a , _a , _a , _a )
# modify model parameter
__a = model_dic.get('''w_conv1''' )
__a = model_dic.get('''wkj''' )
__a = model_dic.get('''vji''' )
__a = model_dic.get('''thre_conv1''' )
__a = model_dic.get('''thre_bp2''' )
__a = model_dic.get('''thre_bp3''' )
return conv_ins
def __UpperCAmelCase ( self , _a ):
return 1 / (1 + np.exp(-1 * x ))
def __UpperCAmelCase ( self , _a ):
return round(_a , 3 )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a ):
# convolution process
__a = convs[0]
__a = convs[1]
__a = np.shape(_a )[0]
# get the data slice of original image data, data_focus
__a = []
for i_focus in range(0 , size_data - size_conv + 1 , _a ):
for j_focus in range(0 , size_data - size_conv + 1 , _a ):
__a = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(_a )
# calculate the feature map of every single kernel, and saved as list of matrix
__a = []
__a = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(_a ):
__a = []
for i_focus in range(len(_a ) ):
__a = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(_a ) )
__a = np.asmatrix(_a ).reshape(
_a , _a )
data_featuremap.append(_a )
# expanding the data slice to One dimenssion
__a = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(_a ) )
__a = np.asarray(_a )
return focus_list, data_featuremap
def __UpperCAmelCase ( self , _a , _a , _a="average_pool" ):
# pooling process
__a = len(featuremaps[0] )
__a = int(size_map / size_pooling )
__a = []
for i_map in range(len(_a ) ):
__a = featuremaps[i_map]
__a = []
for i_focus in range(0 , _a , _a ):
for j_focus in range(0 , _a , _a ):
__a = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(_a ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(_a ) )
__a = np.asmatrix(_a ).reshape(_a , _a )
featuremap_pooled.append(_a )
return featuremap_pooled
def __UpperCAmelCase ( self , _a ):
# expanding three dimension data to one dimension list
__a = []
for i in range(len(_a ) ):
__a = np.shape(data[i] )
__a = data[i].reshape(1 , shapes[0] * shapes[1] )
__a = data_listed.getA().tolist()[0]
data_expanded.extend(_a )
__a = np.asarray(_a )
return data_expanded
def __UpperCAmelCase ( self , _a ):
# expanding matrix to one dimension list
__a = np.asarray(_a )
__a = np.shape(_a )
__a = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a ):
__a = []
__a = 0
for i_map in range(_a ):
__a = np.ones((size_map, size_map) )
for i in range(0 , _a , _a ):
for j in range(0 , _a , _a ):
__a = pd_pool[
i_pool
]
__a = i_pool + 1
__a = np.multiply(
_a , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(_a )
return pd_all
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a=bool ):
# model traning
print('''----------------------Start Training-------------------------''' )
print((''' - - Shape: Train_Data ''', np.shape(_a )) )
print((''' - - Shape: Teach_Data ''', np.shape(_a )) )
__a = 0
__a = []
__a = 10_000
while rp < n_repeat and mse >= error_accuracy:
__a = 0
print(f'''-------------Learning Time {rp}--------------''' )
for p in range(len(_a ) ):
# print('------------Learning Image: %d--------------'%p)
__a = np.asmatrix(datas_train[p] )
__a = np.asarray(datas_teach[p] )
__a , __a = self.convolute(
_a , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
__a = self.pooling(_a , self.size_poolinga )
__a = np.shape(_a )
__a = self._expand(_a )
__a = data_bp_input
__a = np.dot(_a , self.vji.T ) - self.thre_bpa
__a = self.sig(_a )
__a = np.dot(_a , self.wkj.T ) - self.thre_bpa
__a = self.sig(_a )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
__a = np.multiply(
(data_teach - bp_outa) , np.multiply(_a , (1 - bp_outa) ) )
__a = np.multiply(
np.dot(_a , self.wkj ) , np.multiply(_a , (1 - bp_outa) ) )
__a = np.dot(_a , self.vji )
__a = pd_i_all / (self.size_poolinga * self.size_poolinga)
__a = pd_conva_pooled.T.getA().tolist()
__a = self._calculate_gradient_from_pool(
_a , _a , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
__a = self._expand_mat(pd_conva_all[k_conv] )
__a = self.rate_weight * np.dot(_a , _a )
__a = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
__a = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
__a = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
__a = self.vji + pd_j_all.T * bp_outa * self.rate_weight
__a = self.thre_bpa - pd_k_all * self.rate_thre
__a = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
__a = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
__a = rp + 1
__a = error_count / patterns
all_mse.append(_a )
def draw_error():
__a = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(_a , '''+-''' )
plt.plot(_a , '''r--''' )
plt.xlabel('''Learning Times''' )
plt.ylabel('''All_mse''' )
plt.grid(_a , alpha=0.5 )
plt.show()
print('''------------------Training Complished---------------------''' )
print((''' - - Training epoch: ''', rp, f''' - - Mse: {mse:.6f}''') )
if draw_e:
draw_error()
return mse
def __UpperCAmelCase ( self , _a ):
# model predict
__a = []
print('''-------------------Start Testing-------------------------''' )
print((''' - - Shape: Test_Data ''', np.shape(_a )) )
for p in range(len(_a ) ):
__a = np.asmatrix(datas_test[p] )
__a , __a = self.convolute(
_a , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
__a = self.pooling(_a , self.size_poolinga )
__a = self._expand(_a )
__a = data_bp_input
__a = bp_outa * self.vji.T - self.thre_bpa
__a = self.sig(_a )
__a = bp_outa * self.wkj.T - self.thre_bpa
__a = self.sig(_a )
produce_out.extend(bp_outa.getA().tolist() )
__a = [list(map(self.do_round , _a ) ) for each in produce_out]
return np.asarray(_a )
def __UpperCAmelCase ( self , _a ):
# return the data of image after convoluting process so we can check it out
__a = np.asmatrix(_a )
__a , __a = self.convolute(
_a , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
__a = self.pooling(_a , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 11 |
"""simple docstring"""
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self , _a , _a , _a ):
self.assertEqual(len(_a ) , len(_a ) )
for a, b in zip(_a , _a ):
self.assertAlmostEqual(_a , _a , delta=_a )
def __UpperCAmelCase ( self ):
__a = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(_a ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1E-2 )
def __UpperCAmelCase ( self ):
__a = None
ops.enable_eager_execution_internal()
__a = tf.config.list_physical_devices('''CPU''' )
if len(_a ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
__a = tf.config.list_logical_devices(device_type='''CPU''' )
__a = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
__a = GradientAccumulator()
__a = tf.Variable([4.0, 3.0] )
__a , __a = create_optimizer(5E-5 , 10 , 5 )
__a = tf.Variable([0.0, 0.0] , trainable=_a )
def accumulate_on_replica(_a ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(_a , _a ):
with strategy.scope():
__a = strategy.experimental_local_results(_a )
local_variables[0].assign(_a )
local_variables[1].assign(_a )
strategy.run(_a , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(_a )
def _check_local_values(_a , _a ):
__a = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , _a , tol=1E-2 )
self.assertListAlmostEqual(values[1].value() , _a , tol=1E-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 11 | 1 |
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
lowercase_ = "\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n"
class __lowerCAmelCase ( unittest.TestCase , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
__a = load_tool('''text-question-answering''' )
self.tool.setup()
__a = load_tool('''text-question-answering''' , remote=_a )
def __UpperCAmelCase ( self ):
__a = self.tool(_a , '''What did Hugging Face do in April 2021?''' )
self.assertEqual(_a , '''launched the BigScience Research Workshop''' )
def __UpperCAmelCase ( self ):
__a = self.remote_tool(_a , '''What did Hugging Face do in April 2021?''' )
self.assertEqual(_a , '''launched the BigScience Research Workshop''' )
def __UpperCAmelCase ( self ):
__a = self.tool(text=_a , question='''What did Hugging Face do in April 2021?''' )
self.assertEqual(_a , '''launched the BigScience Research Workshop''' )
def __UpperCAmelCase ( self ):
__a = self.remote_tool(text=_a , question='''What did Hugging Face do in April 2021?''' )
self.assertEqual(_a , '''launched the BigScience Research Workshop''' )
| 11 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
lowercase_ = None
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
lowercase_ = {
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/mbart-large-en-ro": "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json",
"facebook/mbart-large-cc25": "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json",
},
}
lowercase_ = {
"facebook/mbart-large-en-ro": 1_0_2_4,
"facebook/mbart-large-cc25": 1_0_2_4,
}
# fmt: off
lowercase_ = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Tuple = VOCAB_FILES_NAMES
__UpperCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Tuple = ['input_ids', 'attention_mask']
__UpperCAmelCase : Optional[Any] = MBartTokenizer
__UpperCAmelCase : List[int] = []
__UpperCAmelCase : List[int] = []
def __init__( self , _a=None , _a=None , _a="<s>" , _a="</s>" , _a="</s>" , _a="<s>" , _a="<unk>" , _a="<pad>" , _a="<mask>" , _a=None , _a=None , _a=None , **_a , ):
# Mask token behave like a normal word, i.e. include the space before it
__a = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
super().__init__(
vocab_file=_a , tokenizer_file=_a , bos_token=_a , eos_token=_a , sep_token=_a , cls_token=_a , unk_token=_a , pad_token=_a , mask_token=_a , src_lang=_a , tgt_lang=_a , additional_special_tokens=_a , **_a , )
__a = vocab_file
__a = False if not self.vocab_file else True
__a = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
__a = {
lang_code: self.convert_tokens_to_ids(_a ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
__a = src_lang if src_lang is not None else '''en_XX'''
__a = self.convert_tokens_to_ids(self._src_lang )
__a = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __UpperCAmelCase ( self ):
return self._src_lang
@src_lang.setter
def __UpperCAmelCase ( self , _a ):
__a = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __UpperCAmelCase ( self , _a , _a = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __UpperCAmelCase ( self , _a , _a = None ):
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCAmelCase ( self , _a , _a , _a , _a , **_a ):
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
__a = src_lang
__a = self(_a , add_special_tokens=_a , return_tensors=_a , **_a )
__a = self.convert_tokens_to_ids(_a )
__a = tgt_lang_id
return inputs
def __UpperCAmelCase ( self , _a , _a = "en_XX" , _a = None , _a = "ro_RO" , **_a , ):
__a = src_lang
__a = tgt_lang
return super().prepare_seqaseq_batch(_a , _a , **_a )
def __UpperCAmelCase ( self ):
return self.set_src_lang_special_tokens(self.src_lang )
def __UpperCAmelCase ( self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __UpperCAmelCase ( self , _a ):
__a = self.convert_tokens_to_ids(_a )
__a = []
__a = [self.eos_token_id, self.cur_lang_code]
__a = self.convert_ids_to_tokens(self.prefix_tokens )
__a = self.convert_ids_to_tokens(self.suffix_tokens )
__a = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __UpperCAmelCase ( self , _a ):
__a = self.convert_tokens_to_ids(_a )
__a = []
__a = [self.eos_token_id, self.cur_lang_code]
__a = self.convert_ids_to_tokens(self.prefix_tokens )
__a = self.convert_ids_to_tokens(self.suffix_tokens )
__a = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __UpperCAmelCase ( self , _a , _a = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(_a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory.''' )
return
__a = os.path.join(
_a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file , _a )
return (out_vocab_file,)
| 11 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase_ = {
"configuration_tapas": ["TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP", "TapasConfig"],
"tokenization_tapas": ["TapasTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TapasForMaskedLM",
"TapasForQuestionAnswering",
"TapasForSequenceClassification",
"TapasModel",
"TapasPreTrainedModel",
"load_tf_weights_in_tapas",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFTapasForMaskedLM",
"TFTapasForQuestionAnswering",
"TFTapasForSequenceClassification",
"TFTapasModel",
"TFTapasPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 11 |
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
lowercase_ = "\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n"
class __lowerCAmelCase ( unittest.TestCase , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
__a = load_tool('''text-question-answering''' )
self.tool.setup()
__a = load_tool('''text-question-answering''' , remote=_a )
def __UpperCAmelCase ( self ):
__a = self.tool(_a , '''What did Hugging Face do in April 2021?''' )
self.assertEqual(_a , '''launched the BigScience Research Workshop''' )
def __UpperCAmelCase ( self ):
__a = self.remote_tool(_a , '''What did Hugging Face do in April 2021?''' )
self.assertEqual(_a , '''launched the BigScience Research Workshop''' )
def __UpperCAmelCase ( self ):
__a = self.tool(text=_a , question='''What did Hugging Face do in April 2021?''' )
self.assertEqual(_a , '''launched the BigScience Research Workshop''' )
def __UpperCAmelCase ( self ):
__a = self.remote_tool(text=_a , question='''What did Hugging Face do in April 2021?''' )
self.assertEqual(_a , '''launched the BigScience Research Workshop''' )
| 11 | 1 |
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Union[List[PIL.Image.Image], np.ndarray]
__UpperCAmelCase : Optional[List[bool]]
__UpperCAmelCase : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 11 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {
"configuration_blip_2": [
"BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Blip2Config",
"Blip2QFormerConfig",
"Blip2VisionConfig",
],
"processing_blip_2": ["Blip2Processor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Blip2Model",
"Blip2QFormerModel",
"Blip2PreTrainedModel",
"Blip2ForConditionalGeneration",
"Blip2VisionModel",
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 11 | 1 |
"""simple docstring"""
lowercase_ = {
"meter": "m",
"kilometer": "km",
"megametre": "Mm",
"gigametre": "Gm",
"terametre": "Tm",
"petametre": "Pm",
"exametre": "Em",
"zettametre": "Zm",
"yottametre": "Ym",
}
# Exponent of the factor(meter)
lowercase_ = {
"m": 0,
"km": 3,
"Mm": 6,
"Gm": 9,
"Tm": 1_2,
"Pm": 1_5,
"Em": 1_8,
"Zm": 2_1,
"Ym": 2_4,
}
def lowercase ( lowerCAmelCase__ : float , lowerCAmelCase__ : str , lowerCAmelCase__ : str ) -> float:
__a = from_type.lower().strip('''s''' )
__a = to_type.lower().strip('''s''' )
__a = UNIT_SYMBOL.get(lowerCAmelCase__ , lowerCAmelCase__ )
__a = UNIT_SYMBOL.get(lowerCAmelCase__ , lowerCAmelCase__ )
if from_sanitized not in METRIC_CONVERSION:
__a = (
f'''Invalid \'from_type\' value: {from_type!r}.\n'''
f'''Conversion abbreviations are: {', '.join(lowerCAmelCase__ )}'''
)
raise ValueError(lowerCAmelCase__ )
if to_sanitized not in METRIC_CONVERSION:
__a = (
f'''Invalid \'to_type\' value: {to_type!r}.\n'''
f'''Conversion abbreviations are: {', '.join(lowerCAmelCase__ )}'''
)
raise ValueError(lowerCAmelCase__ )
__a = METRIC_CONVERSION[from_sanitized]
__a = METRIC_CONVERSION[to_sanitized]
__a = 1
if from_exponent > to_exponent:
__a = from_exponent - to_exponent
else:
__a = -(to_exponent - from_exponent)
return value * pow(10 , lowerCAmelCase__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 11 |
"""simple docstring"""
from __future__ import annotations
def lowercase ( lowerCAmelCase__ : list[int] , lowerCAmelCase__ : list[int] , lowerCAmelCase__ : list[int] , lowerCAmelCase__ : list[list[str]] , lowerCAmelCase__ : int , ) -> None:
__a = len(lowerCAmelCase__ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['''. ''' * i + '''Q ''' + '''. ''' * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(lowerCAmelCase__ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , lowerCAmelCase__ , lowerCAmelCase__ , )
def lowercase ( lowerCAmelCase__ : int ) -> None:
__a = []
depth_first_search([] , [] , [] , lowerCAmelCase__ , lowerCAmelCase__ )
# Print all the boards
for board in boards:
for column in board:
print(lowerCAmelCase__ )
print('''''' )
print(len(lowerCAmelCase__ ) , '''solutions were found.''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 11 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = "▁"
lowercase_ = {"vocab_file": "sentencepiece.bpe.model", "monolingual_vocab_file": "dict.txt"}
lowercase_ = {
"vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model",
},
"monolingual_vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt",
},
}
lowercase_ = {"vinai/bartpho-syllable": 1_0_2_4}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Tuple = VOCAB_FILES_NAMES
__UpperCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Union[str, Any] = ['input_ids', 'attention_mask']
def __init__( self , _a , _a , _a="<s>" , _a="</s>" , _a="</s>" , _a="<s>" , _a="<unk>" , _a="<pad>" , _a="<mask>" , _a = None , **_a , ):
# Mask token behave like a normal word, i.e. include the space before it
__a = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
__a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , cls_token=_a , pad_token=_a , mask_token=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , )
__a = vocab_file
__a = monolingual_vocab_file
__a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_a ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
__a = {}
__a = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(_a ) not in self.fairseq_tokens_to_ids:
__a = cnt
cnt += 1
with open(_a , '''r''' , encoding='''utf-8''' ) as f:
for line in f.readlines():
__a = line.strip().split()[0]
__a = len(self.fairseq_tokens_to_ids )
if str(_a ) not in self.fairseq_tokens_to_ids:
__a = len(self.fairseq_tokens_to_ids )
__a = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
__a = self.__dict__.copy()
__a = None
__a = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , _a ):
__a = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__a = {}
__a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __UpperCAmelCase ( self , _a , _a = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__a = [self.cls_token_id]
__a = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCAmelCase ( self , _a , _a = None , _a = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1, 1] + ([0] * len(_a )) + [1]
def __UpperCAmelCase ( self , _a , _a = None ):
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __UpperCAmelCase ( self ):
return len(self.fairseq_ids_to_tokens )
def __UpperCAmelCase ( self ):
__a = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __UpperCAmelCase ( self , _a ):
return self.sp_model.encode(_a , out_type=_a )
def __UpperCAmelCase ( self , _a ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def __UpperCAmelCase ( self , _a ):
return self.fairseq_ids_to_tokens[index]
def __UpperCAmelCase ( self , _a ):
__a = ''''''.join(_a ).replace(_a , ''' ''' ).strip()
return out_string
def __UpperCAmelCase ( self , _a , _a = None ):
if not os.path.isdir(_a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__a = os.path.join(
_a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
__a = os.path.join(
_a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _a )
elif not os.path.isfile(self.vocab_file ):
with open(_a , '''wb''' ) as fi:
__a = self.sp_model.serialized_model_proto()
fi.write(_a )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
_a ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , _a )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(_a , '''w''' , encoding='''utf-8''' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'''{str(_a )} \n''' )
return out_vocab_file, out_monolingual_vocab_file
| 11 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase_ = {
"configuration_vision_text_dual_encoder": ["VisionTextDualEncoderConfig"],
"processing_vision_text_dual_encoder": ["VisionTextDualEncoderProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["VisionTextDualEncoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["FlaxVisionTextDualEncoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["TFVisionTextDualEncoderModel"]
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 11 | 1 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple ) -> Any:
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
__a = (boundary[1] - boundary[0]) / steps
__a = boundary[0]
__a = boundary[1]
__a = make_points(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__a = 0.0
y += (h / 2.0) * f(lowerCAmelCase__ )
for i in x_i:
# print(i)
y += h * f(lowerCAmelCase__ )
y += (h / 2.0) * f(lowerCAmelCase__ )
return y
def lowercase ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any ) -> str:
__a = a + h
while x < (b - h):
yield x
__a = x + h
def lowercase ( lowerCAmelCase__ : Tuple ) -> int: # enter your function here
__a = (x - 0) * (x - 0)
return y
def lowercase ( ) -> Tuple:
__a = 0.0 # Lower bound of integration
__a = 1.0 # Upper bound of integration
__a = 10.0 # define number of steps or resolution
__a = [a, b] # define boundary of integration
__a = method_a(lowerCAmelCase__ , lowerCAmelCase__ )
print(f'''y = {y}''' )
if __name__ == "__main__":
main()
| 11 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"facebook/vit-mae-base": "https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Tuple = 'vit_mae'
def __init__( self , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.0 , _a=0.0 , _a=0.02 , _a=1E-12 , _a=224 , _a=16 , _a=3 , _a=True , _a=16 , _a=512 , _a=8 , _a=2_048 , _a=0.75 , _a=False , **_a , ):
super().__init__(**_a )
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = initializer_range
__a = layer_norm_eps
__a = image_size
__a = patch_size
__a = num_channels
__a = qkv_bias
__a = decoder_num_attention_heads
__a = decoder_hidden_size
__a = decoder_num_hidden_layers
__a = decoder_intermediate_size
__a = mask_ratio
__a = norm_pix_loss
| 11 | 1 |
"""simple docstring"""
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
lowercase_ = TypeVar("T")
def lowercase ( lowerCAmelCase__ : int ) -> int:
return (position - 1) // 2
def lowercase ( lowerCAmelCase__ : int ) -> int:
return (2 * position) + 1
def lowercase ( lowerCAmelCase__ : int ) -> int:
return (2 * position) + 2
class __lowerCAmelCase ( Generic[T] ):
'''simple docstring'''
def __init__( self ):
__a = []
__a = {}
__a = 0
def __len__( self ):
return self.elements
def __repr__( self ):
return str(self.heap )
def __UpperCAmelCase ( self ):
# Check if the priority queue is empty
return self.elements == 0
def __UpperCAmelCase ( self , _a , _a ):
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
__a = self.elements
self.elements += 1
self._bubble_up(_a )
def __UpperCAmelCase ( self ):
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
__a , __a = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
__a , __a = self.heap[0]
self._bubble_down(_a )
return elem
def __UpperCAmelCase ( self , _a , _a ):
# Update the weight of the given key
__a = self.position_map[elem]
__a = (elem, weight)
if position > 0:
__a = get_parent_position(_a )
__a , __a = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(_a )
else:
self._bubble_down(_a )
else:
self._bubble_down(_a )
def __UpperCAmelCase ( self , _a ):
# Place a node at the proper position (upward movement) [to be used internally
# only]
__a = self.position_map[elem]
if curr_pos == 0:
return None
__a = get_parent_position(_a )
__a , __a = self.heap[curr_pos]
__a , __a = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(_a , _a )
return self._bubble_up(_a )
return None
def __UpperCAmelCase ( self , _a ):
# Place a node at the proper position (downward movement) [to be used
# internally only]
__a = self.position_map[elem]
__a , __a = self.heap[curr_pos]
__a = get_child_left_position(_a )
__a = get_child_right_position(_a )
if child_left_position < self.elements and child_right_position < self.elements:
__a , __a = self.heap[child_left_position]
__a , __a = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(_a , _a )
return self._bubble_down(_a )
if child_left_position < self.elements:
__a , __a = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(_a , _a )
return self._bubble_down(_a )
else:
return None
if child_right_position < self.elements:
__a , __a = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(_a , _a )
return self._bubble_down(_a )
return None
def __UpperCAmelCase ( self , _a , _a ):
# Swap the nodes at the given positions
__a = self.heap[nodea_pos][0]
__a = self.heap[nodea_pos][0]
__a , __a = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
__a = nodea_pos
__a = nodea_pos
class __lowerCAmelCase ( Generic[T] ):
'''simple docstring'''
def __init__( self ):
__a = {}
__a = 0
def __repr__( self ):
return str(self.connections )
def __len__( self ):
return self.nodes
def __UpperCAmelCase ( self , _a ):
# Add a node in the graph if it is not in the graph
if node not in self.connections:
__a = {}
self.nodes += 1
def __UpperCAmelCase ( self , _a , _a , _a ):
# Add an edge between 2 nodes in the graph
self.add_node(_a )
self.add_node(_a )
__a = weight
__a = weight
def lowercase ( lowerCAmelCase__ : GraphUndirectedWeighted[T] , ) -> tuple[dict[T, int], dict[T, T | None]]:
__a = {node: maxsize for node in graph.connections}
__a = {node: None for node in graph.connections}
__a = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(lowerCAmelCase__ , lowerCAmelCase__ )
if priority_queue.is_empty():
return dist, parent
# initialization
__a = priority_queue.extract_min()
__a = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__a = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(lowerCAmelCase__ , dist[neighbour] )
__a = node
# running prim's algorithm
while not priority_queue.is_empty():
__a = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__a = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(lowerCAmelCase__ , dist[neighbour] )
__a = node
return dist, parent
| 11 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : str = ['image_processor', 'tokenizer']
__UpperCAmelCase : str = 'LayoutLMv3ImageProcessor'
__UpperCAmelCase : Optional[int] = ('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast')
def __init__( self , _a=None , _a=None , **_a ):
__a = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _a , )
__a = kwargs.pop('''feature_extractor''' )
__a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_a , _a )
def __call__( self , _a , _a = None , _a = None , _a = None , _a = None , _a = True , _a = False , _a = None , _a = None , _a = 0 , _a = None , _a = None , _a = None , _a = False , _a = False , _a = False , _a = False , _a = True , _a = None , **_a , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
# first, apply the image processor
__a = self.image_processor(images=_a , return_tensors=_a )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(_a , _a ):
__a = [text] # add batch dimension (as the image processor always adds a batch dimension)
__a = features['''words''']
__a = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_token_type_ids=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
# add pixel values
__a = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
__a = self.get_overflowing_images(_a , encoded_inputs['''overflow_to_sample_mapping'''] )
__a = images
return encoded_inputs
def __UpperCAmelCase ( self , _a , _a ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
__a = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(_a ) != len(_a ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f''' {len(_a )} and {len(_a )}''' )
return images_with_overflow
def __UpperCAmelCase ( self , *_a , **_a ):
return self.tokenizer.batch_decode(*_a , **_a )
def __UpperCAmelCase ( self , *_a , **_a ):
return self.tokenizer.decode(*_a , **_a )
@property
def __UpperCAmelCase ( self ):
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def __UpperCAmelCase ( self ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _a , )
return self.image_processor_class
@property
def __UpperCAmelCase ( self ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _a , )
return self.image_processor
| 11 | 1 |
"""simple docstring"""
lowercase_ = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
lowercase_ = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
lowercase_ = {
0: "Sunday",
1: "Monday",
2: "Tuesday",
3: "Wednesday",
4: "Thursday",
5: "Friday",
6: "Saturday",
}
def lowercase ( lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> str:
assert len(str(lowerCAmelCase__ ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
__a = year // 100
__a = (5 * (century % 4) + 2) % 7
__a = year % 100
__a = centurian % 12
__a = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
__a = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
__a = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11 |
"""simple docstring"""
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowercase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any]=0.9_99 , lowerCAmelCase__ : List[str]="cosine" , ) -> Optional[int]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowerCAmelCase__ : int ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowerCAmelCase__ : Optional[Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
__a = []
for i in range(lowerCAmelCase__ ):
__a = i / num_diffusion_timesteps
__a = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowerCAmelCase__ ) / alpha_bar_fn(lowerCAmelCase__ ) , lowerCAmelCase__ ) )
return torch.tensor(lowerCAmelCase__ , dtype=torch.floataa )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Tuple = [e.name for e in KarrasDiffusionSchedulers]
__UpperCAmelCase : str = 2
@register_to_config
def __init__( self , _a = 1_000 , _a = 0.0_0085 , _a = 0.012 , _a = "linear" , _a = None , _a = "epsilon" , _a = "linspace" , _a = 0 , ):
if trained_betas is not None:
__a = torch.tensor(_a , dtype=torch.floataa )
elif beta_schedule == "linear":
__a = torch.linspace(_a , _a , _a , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__a = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , _a , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__a = betas_for_alpha_bar(_a )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
__a = 1.0 - self.betas
__a = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(_a , _a , _a )
def __UpperCAmelCase ( self , _a , _a=None ):
if schedule_timesteps is None:
__a = self.timesteps
__a = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
__a = 1 if len(_a ) > 1 else 0
else:
__a = timestep.cpu().item() if torch.is_tensor(_a ) else timestep
__a = self._index_counter[timestep_int]
return indices[pos].item()
@property
def __UpperCAmelCase ( self ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __UpperCAmelCase ( self , _a , _a , ):
__a = self.index_for_timestep(_a )
if self.state_in_first_order:
__a = self.sigmas[step_index]
else:
__a = self.sigmas_interpol[step_index]
__a = sample / ((sigma**2 + 1) ** 0.5)
return sample
def __UpperCAmelCase ( self , _a , _a = None , _a = None , ):
__a = num_inference_steps
__a = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
__a = np.linspace(0 , num_train_timesteps - 1 , _a , dtype=_a )[::-1].copy()
elif self.config.timestep_spacing == "leading":
__a = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__a = (np.arange(0 , _a ) * step_ratio).round()[::-1].copy().astype(_a )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
__a = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__a = (np.arange(_a , 0 , -step_ratio )).round().copy().astype(_a )
timesteps -= 1
else:
raise ValueError(
f'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
__a = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
__a = torch.from_numpy(np.log(_a ) ).to(_a )
__a = np.interp(_a , np.arange(0 , len(_a ) ) , _a )
__a = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
__a = torch.from_numpy(_a ).to(device=_a )
# interpolate sigmas
__a = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
__a = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
__a = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(_a ).startswith('''mps''' ):
# mps does not support float64
__a = torch.from_numpy(_a ).to(_a , dtype=torch.floataa )
else:
__a = torch.from_numpy(_a ).to(_a )
# interpolate timesteps
__a = self.sigma_to_t(_a ).to(_a , dtype=timesteps.dtype )
__a = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
__a = torch.cat([timesteps[:1], interleaved_timesteps] )
__a = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
__a = defaultdict(_a )
def __UpperCAmelCase ( self , _a ):
# get log sigma
__a = sigma.log()
# get distribution
__a = log_sigma - self.log_sigmas[:, None]
# get sigmas range
__a = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
__a = low_idx + 1
__a = self.log_sigmas[low_idx]
__a = self.log_sigmas[high_idx]
# interpolate sigmas
__a = (low - log_sigma) / (low - high)
__a = w.clamp(0 , 1 )
# transform interpolation to time range
__a = (1 - w) * low_idx + w * high_idx
__a = t.view(sigma.shape )
return t
@property
def __UpperCAmelCase ( self ):
return self.sample is None
def __UpperCAmelCase ( self , _a , _a , _a , _a = True , ):
__a = self.index_for_timestep(_a )
# advance index counter by 1
__a = timestep.cpu().item() if torch.is_tensor(_a ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
__a = self.sigmas[step_index]
__a = self.sigmas_interpol[step_index + 1]
__a = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
__a = self.sigmas[step_index - 1]
__a = self.sigmas_interpol[step_index]
__a = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
__a = 0
__a = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
__a = sigma_hat if self.state_in_first_order else sigma_interpol
__a = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
__a = sigma_hat if self.state_in_first_order else sigma_interpol
__a = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError('''prediction_type not implemented yet: sample''' )
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
__a = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
__a = sigma_interpol - sigma_hat
# store for 2nd order step
__a = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
__a = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
__a = sigma_next - sigma_hat
__a = self.sample
__a = None
__a = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_a )
def __UpperCAmelCase ( self , _a , _a , _a , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__a = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(_a ):
# mps does not support float64
__a = self.timesteps.to(original_samples.device , dtype=torch.floataa )
__a = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
__a = self.timesteps.to(original_samples.device )
__a = timesteps.to(original_samples.device )
__a = [self.index_for_timestep(_a , _a ) for t in timesteps]
__a = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
__a = sigma.unsqueeze(-1 )
__a = original_samples + noise * sigma
return noisy_samples
def __len__( self ):
return self.config.num_train_timesteps
| 11 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
lowercase_ = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , *_a , **_a ):
warnings.warn(
'''The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use BeitImageProcessor instead.''' , _a , )
super().__init__(*_a , **_a )
| 11 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"RWKV/rwkv-4-169m-pile": "https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json",
"RWKV/rwkv-4-430m-pile": "https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json",
"RWKV/rwkv-4-1b5-pile": "https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json",
"RWKV/rwkv-4-3b-pile": "https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json",
"RWKV/rwkv-4-7b-pile": "https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json",
"RWKV/rwkv-4-14b-pile": "https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json",
"RWKV/rwkv-raven-1b5": "https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json",
"RWKV/rwkv-raven-3b": "https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json",
"RWKV/rwkv-raven-7b": "https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json",
"RWKV/rwkv-raven-14b": "https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json",
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Any = 'rwkv'
__UpperCAmelCase : Optional[Any] = {'max_position_embeddings': 'context_length'}
def __init__( self , _a=50_277 , _a=1_024 , _a=4_096 , _a=32 , _a=None , _a=None , _a=1E-5 , _a=0 , _a=0 , _a=6 , _a=False , _a=True , **_a , ):
__a = vocab_size
__a = context_length
__a = hidden_size
__a = num_hidden_layers
__a = attention_hidden_size if attention_hidden_size is not None else hidden_size
__a = intermediate_size if intermediate_size is not None else 4 * hidden_size
__a = layer_norm_epsilon
__a = rescale_every
__a = use_cache
__a = bos_token_id
__a = eos_token_id
super().__init__(
tie_word_embeddings=_a , bos_token_id=_a , eos_token_id=_a , **_a )
| 11 | 1 |
"""simple docstring"""
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , _a , _a , _a , _a=0.0 , _a = None , _a = "geglu" , _a = None , _a = False , _a = False , _a = False , _a = False , _a = True , _a = "layer_norm" , _a = False , ):
super().__init__()
__a = only_cross_attention
__a = (num_embeds_ada_norm is not None) and norm_type == '''ada_norm_zero'''
__a = (num_embeds_ada_norm is not None) and norm_type == '''ada_norm'''
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
f'''`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to'''
f''' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.''' )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
__a = AdaLayerNorm(_a , _a )
elif self.use_ada_layer_norm_zero:
__a = AdaLayerNormZero(_a , _a )
else:
__a = nn.LayerNorm(_a , elementwise_affine=_a )
__a = Attention(
query_dim=_a , heads=_a , dim_head=_a , dropout=_a , bias=_a , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=_a , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
__a = (
AdaLayerNorm(_a , _a )
if self.use_ada_layer_norm
else nn.LayerNorm(_a , elementwise_affine=_a )
)
__a = Attention(
query_dim=_a , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=_a , dim_head=_a , dropout=_a , bias=_a , upcast_attention=_a , ) # is self-attn if encoder_hidden_states is none
else:
__a = None
__a = None
# 3. Feed-forward
__a = nn.LayerNorm(_a , elementwise_affine=_a )
__a = FeedForward(_a , dropout=_a , activation_fn=_a , final_dropout=_a )
# let chunk size default to None
__a = None
__a = 0
def __UpperCAmelCase ( self , _a , _a ):
# Sets chunk feed-forward
__a = chunk_size
__a = dim
def __UpperCAmelCase ( self , _a , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , ):
# Notice that normalization is always applied before the real computation in the following blocks.
# 1. Self-Attention
if self.use_ada_layer_norm:
__a = self.norma(_a , _a )
elif self.use_ada_layer_norm_zero:
__a , __a , __a , __a , __a = self.norma(
_a , _a , _a , hidden_dtype=hidden_states.dtype )
else:
__a = self.norma(_a )
__a = cross_attention_kwargs if cross_attention_kwargs is not None else {}
__a = self.attna(
_a , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=_a , **_a , )
if self.use_ada_layer_norm_zero:
__a = gate_msa.unsqueeze(1 ) * attn_output
__a = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
__a = (
self.norma(_a , _a ) if self.use_ada_layer_norm else self.norma(_a )
)
__a = self.attna(
_a , encoder_hidden_states=_a , attention_mask=_a , **_a , )
__a = attn_output + hidden_states
# 3. Feed-forward
__a = self.norma(_a )
if self.use_ada_layer_norm_zero:
__a = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
f'''`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.''' )
__a = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
__a = torch.cat(
[self.ff(_a ) for hid_slice in norm_hidden_states.chunk(_a , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
__a = self.ff(_a )
if self.use_ada_layer_norm_zero:
__a = gate_mlp.unsqueeze(1 ) * ff_output
__a = ff_output + hidden_states
return hidden_states
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , _a , _a = None , _a = 4 , _a = 0.0 , _a = "geglu" , _a = False , ):
super().__init__()
__a = int(dim * mult )
__a = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
__a = GELU(_a , _a )
if activation_fn == "gelu-approximate":
__a = GELU(_a , _a , approximate='''tanh''' )
elif activation_fn == "geglu":
__a = GEGLU(_a , _a )
elif activation_fn == "geglu-approximate":
__a = ApproximateGELU(_a , _a )
__a = nn.ModuleList([] )
# project in
self.net.append(_a )
# project dropout
self.net.append(nn.Dropout(_a ) )
# project out
self.net.append(nn.Linear(_a , _a ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(_a ) )
def __UpperCAmelCase ( self , _a ):
for module in self.net:
__a = module(_a )
return hidden_states
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , _a , _a , _a = "none" ):
super().__init__()
__a = nn.Linear(_a , _a )
__a = approximate
def __UpperCAmelCase ( self , _a ):
if gate.device.type != "mps":
return F.gelu(_a , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def __UpperCAmelCase ( self , _a ):
__a = self.proj(_a )
__a = self.gelu(_a )
return hidden_states
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , _a , _a ):
super().__init__()
__a = nn.Linear(_a , dim_out * 2 )
def __UpperCAmelCase ( self , _a ):
if gate.device.type != "mps":
return F.gelu(_a )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def __UpperCAmelCase ( self , _a ):
__a , __a = self.proj(_a ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(_a )
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , _a , _a ):
super().__init__()
__a = nn.Linear(_a , _a )
def __UpperCAmelCase ( self , _a ):
__a = self.proj(_a )
return x * torch.sigmoid(1.702 * x )
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , _a , _a ):
super().__init__()
__a = nn.Embedding(_a , _a )
__a = nn.SiLU()
__a = nn.Linear(_a , embedding_dim * 2 )
__a = nn.LayerNorm(_a , elementwise_affine=_a )
def __UpperCAmelCase ( self , _a , _a ):
__a = self.linear(self.silu(self.emb(_a ) ) )
__a , __a = torch.chunk(_a , 2 )
__a = self.norm(_a ) * (1 + scale) + shift
return x
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , _a , _a ):
super().__init__()
__a = CombinedTimestepLabelEmbeddings(_a , _a )
__a = nn.SiLU()
__a = nn.Linear(_a , 6 * embedding_dim , bias=_a )
__a = nn.LayerNorm(_a , elementwise_affine=_a , eps=1E-6 )
def __UpperCAmelCase ( self , _a , _a , _a , _a=None ):
__a = self.linear(self.silu(self.emb(_a , _a , hidden_dtype=_a ) ) )
__a , __a , __a , __a , __a , __a = emb.chunk(6 , dim=1 )
__a = self.norm(_a ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , _a , _a , _a , _a = None , _a = 1E-5 ):
super().__init__()
__a = num_groups
__a = eps
if act_fn is None:
__a = None
else:
__a = get_activation(_a )
__a = nn.Linear(_a , out_dim * 2 )
def __UpperCAmelCase ( self , _a , _a ):
if self.act:
__a = self.act(_a )
__a = self.linear(_a )
__a = emb[:, :, None, None]
__a , __a = emb.chunk(2 , dim=1 )
__a = F.group_norm(_a , self.num_groups , eps=self.eps )
__a = x * (1 + scale) + shift
return x
| 11 |
"""simple docstring"""
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : List[str] = (UnCLIPScheduler,)
def __UpperCAmelCase ( self , **_a ):
__a = {
'''num_train_timesteps''': 1_000,
'''variance_type''': '''fixed_small_log''',
'''clip_sample''': True,
'''clip_sample_range''': 1.0,
'''prediction_type''': '''epsilon''',
}
config.update(**_a )
return config
def __UpperCAmelCase ( self ):
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_a )
def __UpperCAmelCase ( self ):
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=_a )
def __UpperCAmelCase ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_a )
def __UpperCAmelCase ( self ):
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=_a )
def __UpperCAmelCase ( self ):
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=_a )
def __UpperCAmelCase ( self ):
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=_a , prev_timestep=_a )
def __UpperCAmelCase ( self ):
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config(variance_type='''fixed_small_log''' )
__a = scheduler_class(**_a )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0_000E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.054_9625 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.999_4987 ) ) < 1E-5
def __UpperCAmelCase ( self ):
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config(variance_type='''learned_range''' )
__a = scheduler_class(**_a )
__a = 0.5
assert scheduler._get_variance(1 , predicted_variance=_a ) - -10.171_2790 < 1E-5
assert scheduler._get_variance(487 , predicted_variance=_a ) - -5.799_8052 < 1E-5
assert scheduler._get_variance(999 , predicted_variance=_a ) - -0.001_0011 < 1E-5
def __UpperCAmelCase ( self ):
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**_a )
__a = scheduler.timesteps
__a = self.dummy_model()
__a = self.dummy_sample_deter
__a = torch.manual_seed(0 )
for i, t in enumerate(_a ):
# 1. predict noise residual
__a = model(_a , _a )
# 2. predict previous mean of sample x_t-1
__a = scheduler.step(_a , _a , _a , generator=_a ).prev_sample
__a = pred_prev_sample
__a = torch.sum(torch.abs(_a ) )
__a = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 252.268_2495 ) < 1E-2
assert abs(result_mean.item() - 0.328_4743 ) < 1E-3
def __UpperCAmelCase ( self ):
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**_a )
scheduler.set_timesteps(25 )
__a = scheduler.timesteps
__a = self.dummy_model()
__a = self.dummy_sample_deter
__a = torch.manual_seed(0 )
for i, t in enumerate(_a ):
# 1. predict noise residual
__a = model(_a , _a )
if i + 1 == timesteps.shape[0]:
__a = None
else:
__a = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
__a = scheduler.step(
_a , _a , _a , prev_timestep=_a , generator=_a ).prev_sample
__a = pred_prev_sample
__a = torch.sum(torch.abs(_a ) )
__a = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 258.204_4983 ) < 1E-2
assert abs(result_mean.item() - 0.336_2038 ) < 1E-3
def __UpperCAmelCase ( self ):
pass
def __UpperCAmelCase ( self ):
pass
| 11 | 1 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> int:
return int(input_a == input_a == 0 )
def lowercase ( ) -> None:
print('''Truth Table of NOR Gate:''' )
print('''| Input 1 | Input 2 | Output |''' )
print(f'''| 0 | 0 | {nor_gate(0 , 0 )} |''' )
print(f'''| 0 | 1 | {nor_gate(0 , 1 )} |''' )
print(f'''| 1 | 0 | {nor_gate(1 , 0 )} |''' )
print(f'''| 1 | 1 | {nor_gate(1 , 1 )} |''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 11 |
"""simple docstring"""
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def lowercase ( lowerCAmelCase__ : Dict ) -> Optional[int]:
__a , __a = image.size
__a , __a = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
__a = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] )
__a = np.array(lowerCAmelCase__ ).astype(np.floataa ) / 2_55.0
__a = image[None].transpose(0 , 3 , 1 , 2 )
__a = torch.from_numpy(lowerCAmelCase__ )
return 2.0 * image - 1.0
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a , _a , _a , ):
super().__init__()
self.register_modules(vqvae=_a , unet=_a , scheduler=_a )
@torch.no_grad()
def __call__( self , _a = None , _a = 1 , _a = 100 , _a = 0.0 , _a = None , _a = "pil" , _a = True , ):
if isinstance(_a , PIL.Image.Image ):
__a = 1
elif isinstance(_a , torch.Tensor ):
__a = image.shape[0]
else:
raise ValueError(f'''`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(_a )}''' )
if isinstance(_a , PIL.Image.Image ):
__a = preprocess(_a )
__a , __a = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
__a = (batch_size, self.unet.config.in_channels // 2, height, width)
__a = next(self.unet.parameters() ).dtype
__a = randn_tensor(_a , generator=_a , device=self.device , dtype=_a )
__a = image.to(device=self.device , dtype=_a )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(_a , device=self.device )
__a = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
__a = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__a = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__a = {}
if accepts_eta:
__a = eta
for t in self.progress_bar(_a ):
# concat latents and low resolution image in the channel dimension.
__a = torch.cat([latents, image] , dim=1 )
__a = self.scheduler.scale_model_input(_a , _a )
# predict the noise residual
__a = self.unet(_a , _a ).sample
# compute the previous noisy sample x_t -> x_t-1
__a = self.scheduler.step(_a , _a , _a , **_a ).prev_sample
# decode the image latents with the VQVAE
__a = self.vqvae.decode(_a ).sample
__a = torch.clamp(_a , -1.0 , 1.0 )
__a = image / 2 + 0.5
__a = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__a = self.numpy_to_pil(_a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_a )
| 11 | 1 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__SCREAMING_SNAKE_CASE )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : str = field(default='automatic-speech-recognition' , metadata={'include_in_asdict_even_if_is_default': True} )
__UpperCAmelCase : ClassVar[Features] = Features({'audio': Audio()} )
__UpperCAmelCase : ClassVar[Features] = Features({'transcription': Value('string' )} )
__UpperCAmelCase : str = "audio"
__UpperCAmelCase : str = "transcription"
def __UpperCAmelCase ( self , _a ):
if self.audio_column not in features:
raise ValueError(f'''Column {self.audio_column} is not present in features.''' )
if not isinstance(features[self.audio_column] , _a ):
raise ValueError(f'''Column {self.audio_column} is not an Audio type.''' )
__a = copy.deepcopy(self )
__a = self.input_schema.copy()
__a = features[self.audio_column]
__a = input_schema
return task_template
@property
def __UpperCAmelCase ( self ):
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 11 |
"""simple docstring"""
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Dict = DistilBertTokenizer
__UpperCAmelCase : Any = DistilBertTokenizerFast
__UpperCAmelCase : int = True
@slow
def __UpperCAmelCase ( self ):
__a = DistilBertTokenizer.from_pretrained('''distilbert-base-uncased''' )
__a = tokenizer.encode('''sequence builders''' , add_special_tokens=_a )
__a = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_a )
__a = tokenizer.build_inputs_with_special_tokens(_a )
__a = tokenizer.build_inputs_with_special_tokens(_a , _a )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 11 | 1 |
"""simple docstring"""
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def lowercase ( lowerCAmelCase__ : int ) -> int:
__a = prime_factors(lowerCAmelCase__ )
if is_square_free(lowerCAmelCase__ ):
return -1 if len(lowerCAmelCase__ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11 |
"""simple docstring"""
from math import factorial, radians
def lowercase ( lowerCAmelCase__ : float , lowerCAmelCase__ : int = 18 , lowerCAmelCase__ : int = 10 ) -> float:
__a = angle_in_degrees - ((angle_in_degrees // 3_60.0) * 3_60.0)
# Converting from degrees to radians
__a = radians(lowerCAmelCase__ )
__a = angle_in_radians
__a = 3
__a = -1
for _ in range(lowerCAmelCase__ ):
result += (b * (angle_in_radians**a)) / factorial(lowerCAmelCase__ )
__a = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
__import__("doctest").testmod()
| 11 | 1 |
"""simple docstring"""
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
lowercase_ = 5_0_0_0_0
lowercase_ = 5_0_0_0
lowercase_ , lowercase_ = os.path.split(__file__)
lowercase_ = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json"))
@get_duration
def lowercase ( lowerCAmelCase__ : datasets.Dataset , lowerCAmelCase__ : Any ) -> int:
for i in range(lowerCAmelCase__ ):
__a = dataset[i]
@get_duration
def lowercase ( lowerCAmelCase__ : datasets.Dataset , lowerCAmelCase__ : Any , lowerCAmelCase__ : Union[str, Any] ) -> Optional[int]:
for i in range(0 , len(lowerCAmelCase__ ) , lowerCAmelCase__ ):
__a = dataset[i : i + batch_size]
@get_duration
def lowercase ( lowerCAmelCase__ : datasets.Dataset , lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[str] ) -> Any:
with dataset.formatted_as(type=lowerCAmelCase__ ):
for i in range(lowerCAmelCase__ ):
__a = dataset[i]
@get_duration
def lowercase ( lowerCAmelCase__ : datasets.Dataset , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any , lowerCAmelCase__ : Tuple ) -> List[Any]:
with dataset.formatted_as(type=lowerCAmelCase__ ):
for i in range(0 , lowerCAmelCase__ , lowerCAmelCase__ ):
__a = dataset[i : i + batch_size]
def lowercase ( ) -> Dict:
__a = {'''num examples''': SPEED_TEST_N_EXAMPLES}
__a = [
(read, {'''length''': SMALL_TEST}),
(read, {'''length''': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 10}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 100}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 1000}),
(read_formatted, {'''type''': '''numpy''', '''length''': SMALL_TEST}),
(read_formatted, {'''type''': '''pandas''', '''length''': SMALL_TEST}),
(read_formatted, {'''type''': '''torch''', '''length''': SMALL_TEST}),
(read_formatted, {'''type''': '''tensorflow''', '''length''': SMALL_TEST}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 10}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 1000}),
]
__a = [
(read, {'''length''': SMALL_TEST}),
(read, {'''length''': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 10}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 100}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 1000}),
(read_formatted, {'''type''': '''numpy''', '''length''': SMALL_TEST}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 10}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 1000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print('''generating dataset''' )
__a = datasets.Features(
{'''list''': datasets.Sequence(datasets.Value('''float32''' ) ), '''numbers''': datasets.Value('''float32''' )} )
__a = generate_example_dataset(
os.path.join(lowerCAmelCase__ , '''dataset.arrow''' ) , lowerCAmelCase__ , num_examples=lowerCAmelCase__ , seq_shapes={'''list''': (100,)} , )
print('''first set of iterations''' )
for func, kwargs in functions:
print(func.__name__ , str(lowerCAmelCase__ ) )
__a = func(lowerCAmelCase__ , **lowerCAmelCase__ )
print('''shuffling dataset''' )
__a = dataset.shuffle()
print('''Second set of iterations (after shuffling''' )
for func, kwargs in functions_shuffled:
print('''shuffled ''' , func.__name__ , str(lowerCAmelCase__ ) )
__a = func(
lowerCAmelCase__ , **lowerCAmelCase__ )
with open(lowerCAmelCase__ , '''wb''' ) as f:
f.write(json.dumps(lowerCAmelCase__ ).encode('''utf-8''' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 11 |
"""simple docstring"""
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def lowercase ( lowerCAmelCase__ : dict ) -> tuple:
return (data["data"], data["target"])
def lowercase ( lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : np.ndarray ) -> np.ndarray:
__a = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(lowerCAmelCase__ , lowerCAmelCase__ )
# Predict target for test data
__a = xgb.predict(lowerCAmelCase__ )
__a = predictions.reshape(len(lowerCAmelCase__ ) , 1 )
return predictions
def lowercase ( ) -> None:
__a = fetch_california_housing()
__a , __a = data_handling(lowerCAmelCase__ )
__a , __a , __a , __a = train_test_split(
lowerCAmelCase__ , lowerCAmelCase__ , test_size=0.25 , random_state=1 )
__a = xgboost(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Error printing
print(f'''Mean Absolute Error : {mean_absolute_error(lowerCAmelCase__ , lowerCAmelCase__ )}''' )
print(f'''Mean Square Error : {mean_squared_error(lowerCAmelCase__ , lowerCAmelCase__ )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 11 | 1 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(__SCREAMING_SNAKE_CASE ) , 'Tatoeba directory does not exist.' )
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCAmelCase ( self ):
__a = tempfile.mkdtemp()
return TatoebaConverter(save_dir=_a )
@slow
def __UpperCAmelCase ( self ):
self.resolver.convert_models(['''heb-eng'''] )
@slow
def __UpperCAmelCase ( self ):
__a , __a = self.resolver.write_model_card('''opus-mt-he-en''' , dry_run=_a )
assert mmeta["long_pair"] == "heb-eng"
| 11 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase_ = {
"configuration_efficientformer": [
"EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientFormerConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["EfficientFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientFormerForImageClassification",
"EfficientFormerForImageClassificationWithTeacher",
"EfficientFormerModel",
"EfficientFormerPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFEfficientFormerForImageClassification",
"TFEfficientFormerForImageClassificationWithTeacher",
"TFEfficientFormerModel",
"TFEfficientFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 11 | 1 |
"""simple docstring"""
from math import factorial, radians
def lowercase ( lowerCAmelCase__ : float , lowerCAmelCase__ : int = 18 , lowerCAmelCase__ : int = 10 ) -> float:
__a = angle_in_degrees - ((angle_in_degrees // 3_60.0) * 3_60.0)
# Converting from degrees to radians
__a = radians(lowerCAmelCase__ )
__a = angle_in_radians
__a = 3
__a = -1
for _ in range(lowerCAmelCase__ ):
result += (b * (angle_in_radians**a)) / factorial(lowerCAmelCase__ )
__a = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
__import__("doctest").testmod()
| 11 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/config.json",
"funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json",
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/config.json",
"funnel-transformer/medium-base": "https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json",
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/config.json",
"funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json",
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json",
"funnel-transformer/xlarge-base": "https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json",
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = 'funnel'
__UpperCAmelCase : Union[str, Any] = {
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
}
def __init__( self , _a=30_522 , _a=[4, 4, 4] , _a=None , _a=2 , _a=768 , _a=12 , _a=64 , _a=3_072 , _a="gelu_new" , _a=0.1 , _a=0.1 , _a=0.0 , _a=0.1 , _a=None , _a=1E-9 , _a="mean" , _a="relative_shift" , _a=True , _a=True , _a=True , **_a , ):
__a = vocab_size
__a = block_sizes
__a = [1] * len(_a ) if block_repeats is None else block_repeats
assert len(_a ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
__a = num_decoder_layers
__a = d_model
__a = n_head
__a = d_head
__a = d_inner
__a = hidden_act
__a = hidden_dropout
__a = attention_dropout
__a = activation_dropout
__a = initializer_range
__a = initializer_std
__a = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], f'''Got {pooling_type} for `pooling_type` but only \'mean\' and \'max\' are supported.'''
__a = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], f'''Got {attention_type} for `attention_type` but only \'relative_shift\' and \'factorized\' are supported.'''
__a = attention_type
__a = separate_cls
__a = truncate_seq
__a = pool_q_only
super().__init__(**_a )
@property
def __UpperCAmelCase ( self ):
return sum(self.block_sizes )
@num_hidden_layers.setter
def __UpperCAmelCase ( self , _a ):
raise NotImplementedError(
'''This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.''' )
@property
def __UpperCAmelCase ( self ):
return len(self.block_sizes )
@num_blocks.setter
def __UpperCAmelCase ( self , _a ):
raise NotImplementedError('''This model does not support the setting of `num_blocks`. Please set `block_sizes`.''' )
| 11 | 1 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"vocab_file": "spiece.model"}
lowercase_ = {
"vocab_file": {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/spiece.model",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/spiece.model",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/spiece.model",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/spiece.model",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model",
}
}
lowercase_ = {
"albert-base-v1": 5_1_2,
"albert-large-v1": 5_1_2,
"albert-xlarge-v1": 5_1_2,
"albert-xxlarge-v1": 5_1_2,
"albert-base-v2": 5_1_2,
"albert-large-v2": 5_1_2,
"albert-xlarge-v2": 5_1_2,
"albert-xxlarge-v2": 5_1_2,
}
lowercase_ = "▁"
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = VOCAB_FILES_NAMES
__UpperCAmelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , _a , _a=True , _a=True , _a=False , _a="[CLS]" , _a="[SEP]" , _a="<unk>" , _a="[SEP]" , _a="<pad>" , _a="[CLS]" , _a="[MASK]" , _a = None , **_a , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__a = (
AddedToken(_a , lstrip=_a , rstrip=_a , normalized=_a )
if isinstance(_a , _a )
else mask_token
)
__a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_a , remove_space=_a , keep_accents=_a , bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , )
__a = do_lower_case
__a = remove_space
__a = keep_accents
__a = vocab_file
__a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_a )
@property
def __UpperCAmelCase ( self ):
return len(self.sp_model )
def __UpperCAmelCase ( self ):
__a = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
__a = self.__dict__.copy()
__a = None
return state
def __setstate__( self , _a ):
__a = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__a = {}
__a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __UpperCAmelCase ( self , _a ):
if self.remove_space:
__a = ''' '''.join(inputs.strip().split() )
else:
__a = inputs
__a = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
__a = unicodedata.normalize('''NFKD''' , _a )
__a = ''''''.join([c for c in outputs if not unicodedata.combining(_a )] )
if self.do_lower_case:
__a = outputs.lower()
return outputs
def __UpperCAmelCase ( self , _a ):
__a = self.preprocess_text(_a )
__a = self.sp_model.encode(_a , out_type=_a )
__a = []
for piece in pieces:
if len(_a ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
__a = self.sp_model.EncodeAsPieces(piece[:-1].replace(_a , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
__a = cur_pieces[1:]
else:
__a = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_a )
else:
new_pieces.append(_a )
return new_pieces
def __UpperCAmelCase ( self , _a ):
return self.sp_model.PieceToId(_a )
def __UpperCAmelCase ( self , _a ):
return self.sp_model.IdToPiece(_a )
def __UpperCAmelCase ( self , _a ):
__a = []
__a = ''''''
__a = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_a ) + token
__a = True
__a = []
else:
current_sub_tokens.append(_a )
__a = False
out_string += self.sp_model.decode(_a )
return out_string.strip()
def __UpperCAmelCase ( self , _a , _a = None ):
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __UpperCAmelCase ( self , _a , _a = None , _a = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
if token_ids_a is not None:
return [1] + ([0] * len(_a )) + [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1]
def __UpperCAmelCase ( self , _a , _a = None ):
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase ( self , _a , _a = None ):
if not os.path.isdir(_a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__a = os.path.join(
_a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _a )
elif not os.path.isfile(self.vocab_file ):
with open(_a , '''wb''' ) as fi:
__a = self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
| 11 |
"""simple docstring"""
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
__a = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_a , '''width_multiplier''' ) )
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a , _a=13 , _a=64 , _a=2 , _a=3 , _a="swish" , _a=3 , _a=32 , _a=0.1 , _a=0.02 , _a=True , _a=True , _a=10 , _a=None , _a=0.25 , _a=0.0 , _a=0.0 , ):
__a = parent
__a = batch_size
__a = image_size
__a = patch_size
__a = num_channels
__a = make_divisible(512 * width_multiplier , divisor=8 )
__a = hidden_act
__a = conv_kernel_size
__a = output_stride
__a = classifier_dropout_prob
__a = use_labels
__a = is_training
__a = num_labels
__a = initializer_range
__a = scope
__a = width_multiplier
__a = ffn_dropout
__a = attn_dropout
def __UpperCAmelCase ( self ):
__a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.num_labels )
__a = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__a = self.get_config()
return config, pixel_values, labels, pixel_labels
def __UpperCAmelCase ( self ):
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def __UpperCAmelCase ( self , _a , _a , _a , _a ):
__a = MobileViTVaModel(config=_a )
model.to(_a )
model.eval()
__a = model(_a )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __UpperCAmelCase ( self , _a , _a , _a , _a ):
__a = self.num_labels
__a = MobileViTVaForImageClassification(_a )
model.to(_a )
model.eval()
__a = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self , _a , _a , _a , _a ):
__a = self.num_labels
__a = MobileViTVaForSemanticSegmentation(_a )
model.to(_a )
model.eval()
__a = model(_a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__a = model(_a , labels=_a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __UpperCAmelCase ( self ):
__a = self.prepare_config_and_inputs()
__a , __a , __a , __a = config_and_inputs
__a = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
__UpperCAmelCase : Union[str, Any] = (
{
'feature-extraction': MobileViTVaModel,
'image-classification': MobileViTVaForImageClassification,
'image-segmentation': MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : Union[str, Any] = False
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : List[str] = False
def __UpperCAmelCase ( self ):
__a = MobileViTVaModelTester(self )
__a = MobileViTVaConfigTester(self , config_class=_a , has_text_modality=_a )
def __UpperCAmelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileViTV2 does not use inputs_embeds''' )
def __UpperCAmelCase ( self ):
pass
@unittest.skip(reason='''MobileViTV2 does not support input and output embeddings''' )
def __UpperCAmelCase ( self ):
pass
@unittest.skip(reason='''MobileViTV2 does not output attentions''' )
def __UpperCAmelCase ( self ):
pass
@require_torch_multi_gpu
@unittest.skip(reason='''Got `CUDA error: misaligned address` for tests after this one being run.''' )
def __UpperCAmelCase ( self ):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __UpperCAmelCase ( self ):
pass
def __UpperCAmelCase ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(_a )
__a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __UpperCAmelCase ( self ):
def check_hidden_states_output(_a , _a , _a ):
__a = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(_a , _a ) )
__a = outputs.hidden_states
__a = 5
self.assertEqual(len(_a ) , _a )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__a = 2
for i in range(len(_a ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a = True
check_hidden_states_output(_a , _a , _a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_a )
@slow
def __UpperCAmelCase ( self ):
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = MobileViTVaModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def lowercase ( ) -> str:
__a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCAmelCase ( self ):
return (
MobileViTImageProcessor.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' )
if is_vision_available()
else None
)
@slow
def __UpperCAmelCase ( self ):
__a = MobileViTVaForImageClassification.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' ).to(
_a )
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
__a = model(**_a )
# verify the logits
__a = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , _a )
__a = torch.tensor([-1.6_336E00, -7.3_204E-02, -5.1_883E-01] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1E-4 ) )
@slow
def __UpperCAmelCase ( self ):
__a = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
__a = model.to(_a )
__a = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
__a = prepare_img()
__a = image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
__a = model(**_a )
__a = outputs.logits
# verify the logits
__a = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , _a )
__a = torch.tensor(
[
[[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]],
[[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]],
[[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]],
] , device=_a , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _a , atol=1E-4 ) )
@slow
def __UpperCAmelCase ( self ):
__a = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
__a = model.to(_a )
__a = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
__a = prepare_img()
__a = image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
__a = model(**_a )
__a = outputs.logits.detach().cpu()
__a = image_processor.post_process_semantic_segmentation(outputs=_a , target_sizes=[(50, 60)] )
__a = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , _a )
__a = image_processor.post_process_semantic_segmentation(outputs=_a )
__a = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , _a )
| 11 | 1 |
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
lowercase_ = {
"vocab_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/vocab.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/vocab.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/vocab.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/vocab.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/vocab.json",
},
"merges_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/merges.txt",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/merges.txt",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/merges.txt",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/merges.txt",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/merges.txt",
},
"tokenizer_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/tokenizer.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/tokenizer.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/tokenizer.json",
},
}
lowercase_ = {
"gpt2": 1_0_2_4,
"gpt2-medium": 1_0_2_4,
"gpt2-large": 1_0_2_4,
"gpt2-xl": 1_0_2_4,
"distilgpt2": 1_0_2_4,
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = VOCAB_FILES_NAMES
__UpperCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Union[str, Any] = ['input_ids', 'attention_mask']
__UpperCAmelCase : Optional[int] = GPTaTokenizer
def __init__( self , _a=None , _a=None , _a=None , _a="<|endoftext|>" , _a="<|endoftext|>" , _a="<|endoftext|>" , _a=False , **_a , ):
super().__init__(
_a , _a , tokenizer_file=_a , unk_token=_a , bos_token=_a , eos_token=_a , add_prefix_space=_a , **_a , )
__a = kwargs.pop('''add_bos_token''' , _a )
__a = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _a ) != add_prefix_space:
__a = getattr(_a , pre_tok_state.pop('''type''' ) )
__a = add_prefix_space
__a = pre_tok_class(**_a )
__a = add_prefix_space
def __UpperCAmelCase ( self , *_a , **_a ):
__a = kwargs.get('''is_split_into_words''' , _a )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_a , **_a )
def __UpperCAmelCase ( self , *_a , **_a ):
__a = kwargs.get('''is_split_into_words''' , _a )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_a , **_a )
def __UpperCAmelCase ( self , _a , _a = None ):
__a = self._tokenizer.model.save(_a , name=_a )
return tuple(_a )
def __UpperCAmelCase ( self , _a ):
__a = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_a , add_special_tokens=_a ) + [self.eos_token_id] )
if len(_a ) > self.model_max_length:
__a = input_ids[-self.model_max_length :]
return input_ids
| 11 |
"""simple docstring"""
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
lowercase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ):
super().__init__()
if hasattr(scheduler.config , '''steps_offset''' ) and scheduler.config.steps_offset != 1:
__a = (
f'''The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'''
f''' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '''
'''to update the config accordingly as leaving `steps_offset` might led to incorrect results'''
''' in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,'''
''' it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`'''
''' file'''
)
deprecate('''steps_offset!=1''' , '''1.0.0''' , _a , standard_warn=_a )
__a = dict(scheduler.config )
__a = 1
__a = FrozenDict(_a )
if hasattr(scheduler.config , '''skip_prk_steps''' ) and scheduler.config.skip_prk_steps is False:
__a = (
f'''The configuration file of this scheduler: {scheduler} has not set the configuration'''
''' `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make'''
''' sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to'''
''' incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face'''
''' Hub, it would be very nice if you could open a Pull request for the'''
''' `scheduler/scheduler_config.json` file'''
)
deprecate('''skip_prk_steps not set''' , '''1.0.0''' , _a , standard_warn=_a )
__a = dict(scheduler.config )
__a = True
__a = FrozenDict(_a )
if safety_checker is None:
logger.warning(
f'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
segmentation_model=_a , segmentation_processor=_a , vae=_a , text_encoder=_a , tokenizer=_a , unet=_a , scheduler=_a , safety_checker=_a , feature_extractor=_a , )
def __UpperCAmelCase ( self , _a = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__a = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_a )
def __UpperCAmelCase ( self ):
self.enable_attention_slicing(_a )
def __UpperCAmelCase ( self ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
__a = torch.device('''cuda''' )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(_a , _a )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __UpperCAmelCase ( self ):
if self.device != torch.device('''meta''' ) or not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_a , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self , _a , _a , _a , _a = 512 , _a = 512 , _a = 50 , _a = 7.5 , _a = None , _a = 1 , _a = 0.0 , _a = None , _a = None , _a = "pil" , _a = True , _a = None , _a = 1 , **_a , ):
__a = self.segmentation_processor(
text=[text] , images=[image] , padding='''max_length''' , return_tensors='''pt''' ).to(self.device )
__a = self.segmentation_model(**_a )
__a = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
__a = self.numpy_to_pil(_a )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
__a = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=_a , image=_a , mask_image=_a , height=_a , width=_a , num_inference_steps=_a , guidance_scale=_a , negative_prompt=_a , num_images_per_prompt=_a , eta=_a , generator=_a , latents=_a , output_type=_a , return_dict=_a , callback=_a , callback_steps=_a , )
| 11 | 1 |
"""simple docstring"""
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self , _a , _a , _a ):
self.assertEqual(len(_a ) , len(_a ) )
for a, b in zip(_a , _a ):
self.assertAlmostEqual(_a , _a , delta=_a )
def __UpperCAmelCase ( self ):
__a = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(_a ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1E-2 )
def __UpperCAmelCase ( self ):
__a = None
ops.enable_eager_execution_internal()
__a = tf.config.list_physical_devices('''CPU''' )
if len(_a ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
__a = tf.config.list_logical_devices(device_type='''CPU''' )
__a = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
__a = GradientAccumulator()
__a = tf.Variable([4.0, 3.0] )
__a , __a = create_optimizer(5E-5 , 10 , 5 )
__a = tf.Variable([0.0, 0.0] , trainable=_a )
def accumulate_on_replica(_a ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(_a , _a ):
with strategy.scope():
__a = strategy.experimental_local_results(_a )
local_variables[0].assign(_a )
local_variables[1].assign(_a )
strategy.run(_a , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(_a )
def _check_local_values(_a , _a ):
__a = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , _a , tol=1E-2 )
self.assertListAlmostEqual(values[1].value() , _a , tol=1E-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 11 |
"""simple docstring"""
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
lowercase_ = logging.get_logger(__name__)
@add_end_docstrings(__SCREAMING_SNAKE_CASE )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , **_a ):
super().__init__(**_a )
if self.framework != "pt":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
# No specific FOR_XXX available yet
def __call__( self , _a , **_a ):
return super().__call__(_a , **_a )
def __UpperCAmelCase ( self , **_a ):
__a = {}
if "candidate_labels" in kwargs:
__a = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
__a = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def __UpperCAmelCase ( self , _a , _a=None , _a="This is a sound of {}." ):
if isinstance(_a , _a ):
if audio.startswith('''http://''' ) or audio.startswith('''https://''' ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
__a = requests.get(_a ).content
else:
with open(_a , '''rb''' ) as f:
__a = f.read()
if isinstance(_a , _a ):
__a = ffmpeg_read(_a , self.feature_extractor.sampling_rate )
if not isinstance(_a , np.ndarray ):
raise ValueError('''We expect a numpy ndarray as input''' )
if len(audio.shape ) != 1:
raise ValueError('''We expect a single channel audio input for ZeroShotAudioClassificationPipeline''' )
__a = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors='''pt''' )
__a = candidate_labels
__a = [hypothesis_template.format(_a ) for x in candidate_labels]
__a = self.tokenizer(_a , return_tensors=self.framework , padding=_a )
__a = [text_inputs]
return inputs
def __UpperCAmelCase ( self , _a ):
__a = model_inputs.pop('''candidate_labels''' )
__a = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] , _a ):
__a = text_inputs[0]
else:
# Batching case.
__a = text_inputs[0][0]
__a = self.model(**_a , **_a )
__a = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_audio,
}
return model_outputs
def __UpperCAmelCase ( self , _a ):
__a = model_outputs.pop('''candidate_labels''' )
__a = model_outputs['''logits'''][0]
if self.framework == "pt":
__a = logits.softmax(dim=0 )
__a = probs.tolist()
else:
raise ValueError('''`tf` framework not supported.''' )
__a = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(_a , _a ) , key=lambda _a : -x[0] )
]
return result
| 11 | 1 |
"""simple docstring"""
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a , _a=13 , _a=32 , _a=2 , _a=3 , _a=16 , _a=[1, 2, 1] , _a=[2, 2, 4] , _a=2 , _a=2.0 , _a=True , _a=0.0 , _a=0.0 , _a=0.1 , _a="gelu" , _a=False , _a=True , _a=0.02 , _a=1E-5 , _a=True , _a=None , _a=True , _a=10 , _a=8 , _a=["stage1", "stage2", "stage3"] , _a=[1, 2, 3] , ):
__a = parent
__a = batch_size
__a = image_size
__a = patch_size
__a = num_channels
__a = embed_dim
__a = depths
__a = num_heads
__a = window_size
__a = mlp_ratio
__a = qkv_bias
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = drop_path_rate
__a = hidden_act
__a = use_absolute_embeddings
__a = patch_norm
__a = layer_norm_eps
__a = initializer_range
__a = is_training
__a = scope
__a = use_labels
__a = type_sequence_label_size
__a = encoder_stride
__a = out_features
__a = out_indices
def __UpperCAmelCase ( self ):
__a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase ( self ):
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def __UpperCAmelCase ( self , _a , _a , _a ):
__a = MaskFormerSwinModel(config=_a )
model.to(_a )
model.eval()
__a = model(_a )
__a = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__a = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def __UpperCAmelCase ( self , _a , _a , _a ):
__a = MaskFormerSwinBackbone(config=_a )
model.to(_a )
model.eval()
__a = model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(_a ):
__a = ['''stem''']
__a = MaskFormerSwinBackbone(config=_a )
def __UpperCAmelCase ( self ):
__a = self.prepare_config_and_inputs()
__a , __a , __a = config_and_inputs
__a = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Any = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
__UpperCAmelCase : Optional[Any] = {'feature-extraction': MaskFormerSwinModel} if is_torch_available() else {}
__UpperCAmelCase : Dict = False
__UpperCAmelCase : Union[str, Any] = False
__UpperCAmelCase : Dict = False
__UpperCAmelCase : List[Any] = False
__UpperCAmelCase : List[str] = False
def __UpperCAmelCase ( self ):
__a = MaskFormerSwinModelTester(self )
__a = ConfigTester(self , config_class=_a , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
'''`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'''
''' `nn.DataParallel`'''
) )
def __UpperCAmelCase ( self ):
pass
def __UpperCAmelCase ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCAmelCase ( self ):
return
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_a )
@unittest.skip('''Swin does not use inputs_embeds''' )
def __UpperCAmelCase ( self ):
pass
@unittest.skip('''Swin does not support feedforward chunking''' )
def __UpperCAmelCase ( self ):
pass
def __UpperCAmelCase ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(_a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__a = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a , nn.Linear ) )
def __UpperCAmelCase ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(_a )
__a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
@unittest.skip(reason='''MaskFormerSwin is only used as backbone and doesn\'t support output_attentions''' )
def __UpperCAmelCase ( self ):
pass
@unittest.skip(reason='''MaskFormerSwin is only used as an internal backbone''' )
def __UpperCAmelCase ( self ):
pass
def __UpperCAmelCase ( self , _a , _a , _a , _a ):
__a = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(_a , _a ) )
__a = outputs.hidden_states
__a = getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_a ) , _a )
# Swin has a different seq_length
__a = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__a = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def __UpperCAmelCase ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__a = True
self.check_hidden_states_output(_a , _a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a = True
self.check_hidden_states_output(_a , _a , _a , _a )
def __UpperCAmelCase ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = 3
__a = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__a = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__a = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__a = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__a = True
self.check_hidden_states_output(_a , _a , _a , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a = True
self.check_hidden_states_output(_a , _a , _a , (padded_height, padded_width) )
@unittest.skip(reason='''MaskFormerSwin doesn\'t have pretrained checkpoints''' )
def __UpperCAmelCase ( self ):
pass
@unittest.skip(reason='''This will be fixed once MaskFormerSwin is replaced by native Swin''' )
def __UpperCAmelCase ( self ):
pass
@unittest.skip(reason='''This will be fixed once MaskFormerSwin is replaced by native Swin''' )
def __UpperCAmelCase ( self ):
pass
def __UpperCAmelCase ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(_a ):
__a = 0
return t
def check_equivalence(_a , _a , _a , _a={} ):
with torch.no_grad():
__a = model(**_a , return_dict=_a , **_a )
__a = model(**_a , return_dict=_a , **_a ).to_tuple()
def recursive_check(_a , _a ):
if isinstance(_a , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_a , _a ):
recursive_check(_a , _a )
elif isinstance(_a , _a ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(_a , _a )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(_a ) , set_nan_tensor_to_zero(_a ) , atol=1E-5 ) , msg=(
'''Tuple and dict output are not equal. Difference:'''
f''' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:'''
f''' {torch.isnan(_a ).any()} and `inf`: {torch.isinf(_a )}. Dict has'''
f''' `nan`: {torch.isnan(_a ).any()} and `inf`: {torch.isinf(_a )}.'''
) , )
recursive_check(_a , _a )
for model_class in self.all_model_classes:
__a = model_class(_a )
model.to(_a )
model.eval()
__a = self._prepare_for_class(_a , _a )
__a = self._prepare_for_class(_a , _a )
check_equivalence(_a , _a , _a )
__a = self._prepare_for_class(_a , _a , return_labels=_a )
__a = self._prepare_for_class(_a , _a , return_labels=_a )
check_equivalence(_a , _a , _a )
__a = self._prepare_for_class(_a , _a )
__a = self._prepare_for_class(_a , _a )
check_equivalence(_a , _a , _a , {'''output_hidden_states''': True} )
__a = self._prepare_for_class(_a , _a , return_labels=_a )
__a = self._prepare_for_class(_a , _a , return_labels=_a )
check_equivalence(_a , _a , _a , {'''output_hidden_states''': True} )
@require_torch
class __lowerCAmelCase ( unittest.TestCase , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Dict = (MaskFormerSwinBackbone,) if is_torch_available() else ()
__UpperCAmelCase : Union[str, Any] = MaskFormerSwinConfig
def __UpperCAmelCase ( self ):
__a = MaskFormerSwinModelTester(self )
def __UpperCAmelCase ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = inputs_dict['''pixel_values'''].shape[0]
for backbone_class in self.all_model_classes:
__a = backbone_class(_a )
backbone.to(_a )
backbone.eval()
__a = backbone(**_a )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , _a )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
__a = backbone(**_a , output_hidden_states=_a )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
__a , __a , __a = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
__a = backbone(**_a , output_attentions=_a )
self.assertIsNotNone(outputs.attentions )
| 11 |
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self , _a ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ):
__a = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(_a )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
__a = '''sgugger/tiny-distilbert-classification'''
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , only_pretrain_model=_a , )
__a = PyTorchBenchmark(_a )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , torchscript=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , fpaa=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = AutoConfig.from_pretrained(_a )
# set architectures equal to `None`
__a = None
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a , configs=[config] )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == '''cpu''' , '''Can\'t do half precision''' )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , fpaa=_a , multi_process=_a , )
__a = PyTorchBenchmark(_a )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = AutoConfig.from_pretrained(_a )
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a , configs=[config] )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tinier_bart'''
__a = AutoConfig.from_pretrained(_a )
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a , configs=[config] )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = AutoConfig.from_pretrained(_a )
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a , configs=[config] )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tinier_bart'''
__a = AutoConfig.from_pretrained(_a )
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a , configs=[config] )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , save_to_csv=_a , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_a , '''inf_time.csv''' ) , train_memory_csv_file=os.path.join(_a , '''train_mem.csv''' ) , inference_memory_csv_file=os.path.join(_a , '''inf_mem.csv''' ) , train_time_csv_file=os.path.join(_a , '''train_time.csv''' ) , env_info_csv_file=os.path.join(_a , '''env.csv''' ) , multi_process=_a , )
__a = PyTorchBenchmark(_a )
benchmark.run()
self.assertTrue(Path(os.path.join(_a , '''inf_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_a , '''train_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_a , '''inf_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_a , '''train_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_a , '''env.csv''' ) ).exists() )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(_a ):
self.assertTrue(hasattr(_a , '''sequential''' ) )
self.assertTrue(hasattr(_a , '''cumulative''' ) )
self.assertTrue(hasattr(_a , '''current''' ) )
self.assertTrue(hasattr(_a , '''total''' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_a , '''log.txt''' ) , log_print=_a , trace_memory_line_by_line=_a , multi_process=_a , )
__a = PyTorchBenchmark(_a )
__a = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(_a , '''log.txt''' ) ).exists() )
| 11 | 1 |
"""simple docstring"""
from collections import namedtuple
lowercase_ = namedtuple("from_to", "from_ to")
lowercase_ = {
"cubicmeter": from_to(1, 1),
"litre": from_to(0.001, 1_0_0_0),
"kilolitre": from_to(1, 1),
"gallon": from_to(0.00454, 264.172),
"cubicyard": from_to(0.76455, 1.30795),
"cubicfoot": from_to(0.028, 35.3147),
"cup": from_to(0.000236588, 4226.75),
}
def lowercase ( lowerCAmelCase__ : float , lowerCAmelCase__ : str , lowerCAmelCase__ : str ) -> float:
if from_type not in METRIC_CONVERSION:
raise ValueError(
f'''Invalid \'from_type\' value: {from_type!r} Supported values are:\n'''
+ ''', '''.join(lowerCAmelCase__ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
f'''Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'''
+ ''', '''.join(lowerCAmelCase__ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FocalNetForImageClassification",
"FocalNetForMaskedImageModeling",
"FocalNetBackbone",
"FocalNetModel",
"FocalNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 11 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"facebook/data2vec-text-base": "https://huggingface.co/data2vec/resolve/main/config.json",
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Tuple = 'data2vec-text'
def __init__( self , _a=30_522 , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.02 , _a=1E-12 , _a=1 , _a=0 , _a=2 , _a="absolute" , _a=True , _a=None , **_a , ):
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = layer_norm_eps
__a = position_embedding_type
__a = use_cache
__a = classifier_dropout
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def __UpperCAmelCase ( self ):
if self.task == "multiple-choice":
__a = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__a = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 11 |
"""simple docstring"""
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self , _a , _a , _a ):
self.assertEqual(len(_a ) , len(_a ) )
for a, b in zip(_a , _a ):
self.assertAlmostEqual(_a , _a , delta=_a )
def __UpperCAmelCase ( self ):
__a = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(_a ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1E-2 )
def __UpperCAmelCase ( self ):
__a = None
ops.enable_eager_execution_internal()
__a = tf.config.list_physical_devices('''CPU''' )
if len(_a ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
__a = tf.config.list_logical_devices(device_type='''CPU''' )
__a = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
__a = GradientAccumulator()
__a = tf.Variable([4.0, 3.0] )
__a , __a = create_optimizer(5E-5 , 10 , 5 )
__a = tf.Variable([0.0, 0.0] , trainable=_a )
def accumulate_on_replica(_a ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(_a , _a ):
with strategy.scope():
__a = strategy.experimental_local_results(_a )
local_variables[0].assign(_a )
local_variables[1].assign(_a )
strategy.run(_a , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(_a )
def _check_local_values(_a , _a ):
__a = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , _a , tol=1E-2 )
self.assertListAlmostEqual(values[1].value() , _a , tol=1E-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 11 | 1 |
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
lowercase_ = random.Random()
def lowercase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : int=1.0 , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : Optional[int]=None ) -> List[Any]:
if rng is None:
__a = global_rng
__a = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _a , _a=7 , _a=400 , _a=2_000 , _a=24 , _a=24 , _a=0.0 , _a=16_000 , _a=True , _a=True , ):
__a = parent
__a = batch_size
__a = min_seq_length
__a = max_seq_length
__a = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__a = feature_size
__a = num_mel_bins
__a = padding_value
__a = sampling_rate
__a = return_attention_mask
__a = do_normalize
def __UpperCAmelCase ( self ):
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __UpperCAmelCase ( self , _a=False , _a=False ):
def _flatten(_a ):
return list(itertools.chain(*_a ) )
if equal_length:
__a = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__a = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__a = [np.asarray(_a ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : str = SpeechaTextFeatureExtractor if is_speech_available() else None
def __UpperCAmelCase ( self ):
__a = SpeechaTextFeatureExtractionTester(self )
def __UpperCAmelCase ( self , _a ):
self.assertTrue(np.all(np.mean(_a , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(_a , axis=0 ) - 1 ) < 1E-3 ) )
def __UpperCAmelCase ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
__a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__a = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__a = [np.asarray(_a ) for speech_input in speech_inputs]
# Test feature size
__a = feature_extractor(_a , padding=_a , return_tensors='''np''' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
__a = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features
__a = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features
self.assertTrue(np.allclose(_a , _a , atol=1E-3 ) )
# Test batched
__a = feature_extractor(_a , return_tensors='''np''' ).input_features
__a = feature_extractor(_a , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
__a = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__a = np.asarray(_a )
__a = feature_extractor(_a , return_tensors='''np''' ).input_features
__a = feature_extractor(_a , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1E-3 ) )
def __UpperCAmelCase ( self ):
__a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__a = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__a = ['''longest''', '''max_length''', '''do_not_pad''']
__a = [None, 16, None]
for max_length, padding in zip(_a , _a ):
__a = feature_extractor(
_a , padding=_a , max_length=_a , return_attention_mask=_a )
__a = inputs.input_features
__a = inputs.attention_mask
__a = [np.sum(_a ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def __UpperCAmelCase ( self ):
__a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__a = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__a = ['''longest''', '''max_length''', '''do_not_pad''']
__a = [None, 16, None]
for max_length, padding in zip(_a , _a ):
__a = feature_extractor(
_a , max_length=_a , padding=_a , return_tensors='''np''' , return_attention_mask=_a )
__a = inputs.input_features
__a = inputs.attention_mask
__a = [np.sum(_a ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def __UpperCAmelCase ( self ):
__a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__a = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__a = feature_extractor(
_a , padding='''max_length''' , max_length=4 , truncation=_a , return_tensors='''np''' , return_attention_mask=_a , )
__a = inputs.input_features
__a = inputs.attention_mask
__a = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def __UpperCAmelCase ( self ):
__a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__a = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__a = feature_extractor(
_a , padding='''longest''' , max_length=4 , truncation=_a , return_tensors='''np''' , return_attention_mask=_a , )
__a = inputs.input_features
__a = inputs.attention_mask
__a = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 24) )
__a = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__a = feature_extractor(
_a , padding='''longest''' , max_length=16 , truncation=_a , return_tensors='''np''' , return_attention_mask=_a , )
__a = inputs.input_features
__a = inputs.attention_mask
__a = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 24) )
def __UpperCAmelCase ( self ):
import torch
__a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__a = np.random.rand(100 , 32 ).astype(np.floataa )
__a = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__a = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
__a = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def __UpperCAmelCase ( self , _a ):
from datasets import load_dataset
__a = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
__a = ds.sort('''id''' ).select(range(_a ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def __UpperCAmelCase ( self ):
# fmt: off
__a = np.array([
-1.5745, -1.7713, -1.7020, -1.6069, -1.2250, -1.1105, -0.9072, -0.8241,
-1.2310, -0.8098, -0.3320, -0.4101, -0.7985, -0.4996, -0.8213, -0.9128,
-1.0420, -1.1286, -1.0440, -0.7999, -0.8405, -1.2275, -1.5443, -1.4625,
] )
# fmt: on
__a = self._load_datasamples(1 )
__a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__a = feature_extractor(_a , return_tensors='''pt''' ).input_features
self.assertEquals(input_features.shape , (1, 584, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30] , _a , atol=1E-4 ) )
| 11 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
lowercase_ = None
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
lowercase_ = {
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/mbart-large-en-ro": "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json",
"facebook/mbart-large-cc25": "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json",
},
}
lowercase_ = {
"facebook/mbart-large-en-ro": 1_0_2_4,
"facebook/mbart-large-cc25": 1_0_2_4,
}
# fmt: off
lowercase_ = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Tuple = VOCAB_FILES_NAMES
__UpperCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Tuple = ['input_ids', 'attention_mask']
__UpperCAmelCase : Optional[Any] = MBartTokenizer
__UpperCAmelCase : List[int] = []
__UpperCAmelCase : List[int] = []
def __init__( self , _a=None , _a=None , _a="<s>" , _a="</s>" , _a="</s>" , _a="<s>" , _a="<unk>" , _a="<pad>" , _a="<mask>" , _a=None , _a=None , _a=None , **_a , ):
# Mask token behave like a normal word, i.e. include the space before it
__a = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
super().__init__(
vocab_file=_a , tokenizer_file=_a , bos_token=_a , eos_token=_a , sep_token=_a , cls_token=_a , unk_token=_a , pad_token=_a , mask_token=_a , src_lang=_a , tgt_lang=_a , additional_special_tokens=_a , **_a , )
__a = vocab_file
__a = False if not self.vocab_file else True
__a = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
__a = {
lang_code: self.convert_tokens_to_ids(_a ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
__a = src_lang if src_lang is not None else '''en_XX'''
__a = self.convert_tokens_to_ids(self._src_lang )
__a = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __UpperCAmelCase ( self ):
return self._src_lang
@src_lang.setter
def __UpperCAmelCase ( self , _a ):
__a = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __UpperCAmelCase ( self , _a , _a = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __UpperCAmelCase ( self , _a , _a = None ):
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCAmelCase ( self , _a , _a , _a , _a , **_a ):
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
__a = src_lang
__a = self(_a , add_special_tokens=_a , return_tensors=_a , **_a )
__a = self.convert_tokens_to_ids(_a )
__a = tgt_lang_id
return inputs
def __UpperCAmelCase ( self , _a , _a = "en_XX" , _a = None , _a = "ro_RO" , **_a , ):
__a = src_lang
__a = tgt_lang
return super().prepare_seqaseq_batch(_a , _a , **_a )
def __UpperCAmelCase ( self ):
return self.set_src_lang_special_tokens(self.src_lang )
def __UpperCAmelCase ( self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __UpperCAmelCase ( self , _a ):
__a = self.convert_tokens_to_ids(_a )
__a = []
__a = [self.eos_token_id, self.cur_lang_code]
__a = self.convert_ids_to_tokens(self.prefix_tokens )
__a = self.convert_ids_to_tokens(self.suffix_tokens )
__a = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __UpperCAmelCase ( self , _a ):
__a = self.convert_tokens_to_ids(_a )
__a = []
__a = [self.eos_token_id, self.cur_lang_code]
__a = self.convert_ids_to_tokens(self.prefix_tokens )
__a = self.convert_ids_to_tokens(self.suffix_tokens )
__a = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __UpperCAmelCase ( self , _a , _a = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(_a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory.''' )
return
__a = os.path.join(
_a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file , _a )
return (out_vocab_file,)
| 11 | 1 |
"""simple docstring"""
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
lowercase_ = get_tests_dir("fixtures")
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
# A mock response for an HTTP head request to emulate server down
__a = mock.Mock()
__a = 500
__a = {}
__a = HTTPError
__a = {}
# Download this model to make sure it's in the cache.
__a = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=_a ) as mock_head:
__a = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' )
# This check we did call the fake head request
mock_head.assert_called()
def __UpperCAmelCase ( self ):
# This test is for deprecated behavior and can be removed in v5
__a = ViTImageProcessor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json''' )
def __UpperCAmelCase ( self ):
with self.assertRaises(_a ):
# config is in subfolder, the following should not work without specifying the subfolder
__a = AutoImageProcessor.from_pretrained('''hf-internal-testing/stable-diffusion-all-variants''' )
__a = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/stable-diffusion-all-variants''' , subfolder='''feature_extractor''' )
self.assertIsNotNone(_a )
@is_staging_test
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def __UpperCAmelCase ( cls ):
__a = TOKEN
HfFolder.save_token(_a )
@classmethod
def __UpperCAmelCase ( cls ):
try:
delete_repo(token=cls._token , repo_id='''test-image-processor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-image-processor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-image-processor''' )
except HTTPError:
pass
def __UpperCAmelCase ( self ):
__a = ViTImageProcessor.from_pretrained(_a )
image_processor.push_to_hub('''test-image-processor''' , use_auth_token=self._token )
__a = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(_a , getattr(_a , _a ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-image-processor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
_a , repo_id='''test-image-processor''' , push_to_hub=_a , use_auth_token=self._token )
__a = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(_a , getattr(_a , _a ) )
def __UpperCAmelCase ( self ):
__a = ViTImageProcessor.from_pretrained(_a )
image_processor.push_to_hub('''valid_org/test-image-processor''' , use_auth_token=self._token )
__a = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(_a , getattr(_a , _a ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-image-processor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
_a , repo_id='''valid_org/test-image-processor-org''' , push_to_hub=_a , use_auth_token=self._token )
__a = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor-org''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(_a , getattr(_a , _a ) )
def __UpperCAmelCase ( self ):
CustomImageProcessor.register_for_auto_class()
__a = CustomImageProcessor.from_pretrained(_a )
image_processor.push_to_hub('''test-dynamic-image-processor''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {'''AutoImageProcessor''': '''custom_image_processing.CustomImageProcessor'''} , )
__a = AutoImageProcessor.from_pretrained(
f'''{USER}/test-dynamic-image-processor''' , trust_remote_code=_a )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , '''CustomImageProcessor''' )
| 11 |
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
lowercase_ = "\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n"
class __lowerCAmelCase ( unittest.TestCase , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
__a = load_tool('''text-question-answering''' )
self.tool.setup()
__a = load_tool('''text-question-answering''' , remote=_a )
def __UpperCAmelCase ( self ):
__a = self.tool(_a , '''What did Hugging Face do in April 2021?''' )
self.assertEqual(_a , '''launched the BigScience Research Workshop''' )
def __UpperCAmelCase ( self ):
__a = self.remote_tool(_a , '''What did Hugging Face do in April 2021?''' )
self.assertEqual(_a , '''launched the BigScience Research Workshop''' )
def __UpperCAmelCase ( self ):
__a = self.tool(text=_a , question='''What did Hugging Face do in April 2021?''' )
self.assertEqual(_a , '''launched the BigScience Research Workshop''' )
def __UpperCAmelCase ( self ):
__a = self.remote_tool(text=_a , question='''What did Hugging Face do in April 2021?''' )
self.assertEqual(_a , '''launched the BigScience Research Workshop''' )
| 11 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"huggingface/time-series-transformer-tourism-monthly": (
"https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json"
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Tuple = 'time_series_transformer'
__UpperCAmelCase : Tuple = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self , _a = None , _a = None , _a = "student_t" , _a = "nll" , _a = 1 , _a = [1, 2, 3, 4, 5, 6, 7] , _a = "mean" , _a = 0 , _a = 0 , _a = 0 , _a = 0 , _a = None , _a = None , _a = 32 , _a = 32 , _a = 2 , _a = 2 , _a = 2 , _a = 2 , _a = True , _a = "gelu" , _a = 64 , _a = 0.1 , _a = 0.1 , _a = 0.1 , _a = 0.1 , _a = 0.1 , _a = 100 , _a = 0.02 , _a=True , **_a , ):
# time series specific configuration
__a = prediction_length
__a = context_length or prediction_length
__a = distribution_output
__a = loss
__a = input_size
__a = num_time_features
__a = lags_sequence
__a = scaling
__a = num_dynamic_real_features
__a = num_static_real_features
__a = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(_a ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
__a = cardinality
else:
__a = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(_a ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
__a = embedding_dimension
else:
__a = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
__a = num_parallel_samples
# Transformer architecture configuration
__a = input_size * len(_a ) + self._number_of_features
__a = d_model
__a = encoder_attention_heads
__a = decoder_attention_heads
__a = encoder_ffn_dim
__a = decoder_ffn_dim
__a = encoder_layers
__a = decoder_layers
__a = dropout
__a = attention_dropout
__a = activation_dropout
__a = encoder_layerdrop
__a = decoder_layerdrop
__a = activation_function
__a = init_std
__a = use_cache
super().__init__(is_encoder_decoder=_a , **_a )
@property
def __UpperCAmelCase ( self ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 11 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {
"configuration_blip_2": [
"BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Blip2Config",
"Blip2QFormerConfig",
"Blip2VisionConfig",
],
"processing_blip_2": ["Blip2Processor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Blip2Model",
"Blip2QFormerModel",
"Blip2PreTrainedModel",
"Blip2ForConditionalGeneration",
"Blip2VisionModel",
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 11 | 1 |
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self , _a ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ):
__a = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(_a )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
__a = '''sgugger/tiny-distilbert-classification'''
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , only_pretrain_model=_a , )
__a = PyTorchBenchmark(_a )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , torchscript=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , fpaa=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = AutoConfig.from_pretrained(_a )
# set architectures equal to `None`
__a = None
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a , configs=[config] )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == '''cpu''' , '''Can\'t do half precision''' )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , fpaa=_a , multi_process=_a , )
__a = PyTorchBenchmark(_a )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = AutoConfig.from_pretrained(_a )
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a , configs=[config] )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tinier_bart'''
__a = AutoConfig.from_pretrained(_a )
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a , configs=[config] )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = AutoConfig.from_pretrained(_a )
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a , configs=[config] )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tinier_bart'''
__a = AutoConfig.from_pretrained(_a )
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a , configs=[config] )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , save_to_csv=_a , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_a , '''inf_time.csv''' ) , train_memory_csv_file=os.path.join(_a , '''train_mem.csv''' ) , inference_memory_csv_file=os.path.join(_a , '''inf_mem.csv''' ) , train_time_csv_file=os.path.join(_a , '''train_time.csv''' ) , env_info_csv_file=os.path.join(_a , '''env.csv''' ) , multi_process=_a , )
__a = PyTorchBenchmark(_a )
benchmark.run()
self.assertTrue(Path(os.path.join(_a , '''inf_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_a , '''train_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_a , '''inf_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_a , '''train_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_a , '''env.csv''' ) ).exists() )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(_a ):
self.assertTrue(hasattr(_a , '''sequential''' ) )
self.assertTrue(hasattr(_a , '''cumulative''' ) )
self.assertTrue(hasattr(_a , '''current''' ) )
self.assertTrue(hasattr(_a , '''total''' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_a , '''log.txt''' ) , log_print=_a , trace_memory_line_by_line=_a , multi_process=_a , )
__a = PyTorchBenchmark(_a )
__a = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(_a , '''log.txt''' ) ).exists() )
| 11 |
"""simple docstring"""
from __future__ import annotations
def lowercase ( lowerCAmelCase__ : list[int] , lowerCAmelCase__ : list[int] , lowerCAmelCase__ : list[int] , lowerCAmelCase__ : list[list[str]] , lowerCAmelCase__ : int , ) -> None:
__a = len(lowerCAmelCase__ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['''. ''' * i + '''Q ''' + '''. ''' * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(lowerCAmelCase__ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , lowerCAmelCase__ , lowerCAmelCase__ , )
def lowercase ( lowerCAmelCase__ : int ) -> None:
__a = []
depth_first_search([] , [] , [] , lowerCAmelCase__ , lowerCAmelCase__ )
# Print all the boards
for board in boards:
for column in board:
print(lowerCAmelCase__ )
print('''''' )
print(len(lowerCAmelCase__ ) , '''solutions were found.''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 11 | 1 |
"""simple docstring"""
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a ):
# we need a list not a string, so do something to change the type
__a = arr.split(''',''' )
def __UpperCAmelCase ( self ):
__a = [int(self.array[0] )] * len(self.array )
__a = [int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
__a = max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
__a = max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
lowercase_ = input("please input some numbers:")
lowercase_ = SubArray(whole_array)
lowercase_ = array.solve_sub_array()
print(("the results is:", re))
| 11 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase_ = {
"configuration_vision_text_dual_encoder": ["VisionTextDualEncoderConfig"],
"processing_vision_text_dual_encoder": ["VisionTextDualEncoderProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["VisionTextDualEncoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["FlaxVisionTextDualEncoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["TFVisionTextDualEncoderModel"]
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 11 | 1 |
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def lowercase ( lowerCAmelCase__ : str = "https://www.worldometers.info/coronavirus" ) -> dict:
__a = BeautifulSoup(requests.get(lowerCAmelCase__ ).text , '''html.parser''' )
__a = soup.findAll('''h1''' )
__a = soup.findAll('''div''' , {'''class''': '''maincounter-number'''} )
keys += soup.findAll('''span''' , {'''class''': '''panel-title'''} )
values += soup.findAll('''div''' , {'''class''': '''number-table-main'''} )
return {key.text.strip(): value.text.strip() for key, value in zip(lowerCAmelCase__ , lowerCAmelCase__ )}
if __name__ == "__main__":
print("\033[1m" + "COVID-19 Status of the World" + "\033[0m\n")
for key, value in world_covidaa_stats().items():
print(F'''{key}\n{value}\n''')
| 11 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"facebook/vit-mae-base": "https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Tuple = 'vit_mae'
def __init__( self , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.0 , _a=0.0 , _a=0.02 , _a=1E-12 , _a=224 , _a=16 , _a=3 , _a=True , _a=16 , _a=512 , _a=8 , _a=2_048 , _a=0.75 , _a=False , **_a , ):
super().__init__(**_a )
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = initializer_range
__a = layer_norm_eps
__a = image_size
__a = patch_size
__a = num_channels
__a = qkv_bias
__a = decoder_num_attention_heads
__a = decoder_hidden_size
__a = decoder_num_hidden_layers
__a = decoder_intermediate_size
__a = mask_ratio
__a = norm_pix_loss
| 11 | 1 |
"""simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
lowercase_ = logging.get_logger(__name__)
@add_end_docstrings(__SCREAMING_SNAKE_CASE )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , *_a , **_a ):
super().__init__(*_a , **_a )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == '''tf''' else MODEL_FOR_VISION_2_SEQ_MAPPING )
def __UpperCAmelCase ( self , _a=None , _a=None , _a=None ):
__a = {}
__a = {}
if prompt is not None:
__a = prompt
if generate_kwargs is not None:
__a = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
__a = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'''\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'''
''' please use only one''' )
__a = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self , _a , **_a ):
return super().__call__(_a , **_a )
def __UpperCAmelCase ( self , _a , _a=None ):
__a = load_image(_a )
if prompt is not None:
if not isinstance(_a , _a ):
raise ValueError(
f'''Received an invalid text input, got - {type(_a )} - but expected a single string. '''
'''Note also that one single text can be provided for conditional image to text generation.''' )
__a = self.model.config.model_type
if model_type == "git":
__a = self.image_processor(images=_a , return_tensors=self.framework )
__a = self.tokenizer(text=_a , add_special_tokens=_a ).input_ids
__a = [self.tokenizer.cls_token_id] + input_ids
__a = torch.tensor(_a ).unsqueeze(0 )
model_inputs.update({'''input_ids''': input_ids} )
elif model_type == "pix2struct":
__a = self.image_processor(images=_a , header_text=_a , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
__a = self.image_processor(images=_a , return_tensors=self.framework )
__a = self.tokenizer(_a , return_tensors=self.framework )
model_inputs.update(_a )
else:
raise ValueError(f'''Model type {model_type} does not support conditional text generation''' )
else:
__a = self.image_processor(images=_a , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
__a = None
return model_inputs
def __UpperCAmelCase ( self , _a , _a=None ):
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs['''input_ids'''] , _a )
and all(x is None for x in model_inputs['''input_ids'''] )
):
__a = None
if generate_kwargs is None:
__a = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
__a = model_inputs.pop(self.model.main_input_name )
__a = self.model.generate(_a , **_a , **_a )
return model_outputs
def __UpperCAmelCase ( self , _a ):
__a = []
for output_ids in model_outputs:
__a = {
'''generated_text''': self.tokenizer.decode(
_a , skip_special_tokens=_a , )
}
records.append(_a )
return records
| 11 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : str = ['image_processor', 'tokenizer']
__UpperCAmelCase : str = 'LayoutLMv3ImageProcessor'
__UpperCAmelCase : Optional[int] = ('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast')
def __init__( self , _a=None , _a=None , **_a ):
__a = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _a , )
__a = kwargs.pop('''feature_extractor''' )
__a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_a , _a )
def __call__( self , _a , _a = None , _a = None , _a = None , _a = None , _a = True , _a = False , _a = None , _a = None , _a = 0 , _a = None , _a = None , _a = None , _a = False , _a = False , _a = False , _a = False , _a = True , _a = None , **_a , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
# first, apply the image processor
__a = self.image_processor(images=_a , return_tensors=_a )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(_a , _a ):
__a = [text] # add batch dimension (as the image processor always adds a batch dimension)
__a = features['''words''']
__a = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_token_type_ids=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
# add pixel values
__a = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
__a = self.get_overflowing_images(_a , encoded_inputs['''overflow_to_sample_mapping'''] )
__a = images
return encoded_inputs
def __UpperCAmelCase ( self , _a , _a ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
__a = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(_a ) != len(_a ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f''' {len(_a )} and {len(_a )}''' )
return images_with_overflow
def __UpperCAmelCase ( self , *_a , **_a ):
return self.tokenizer.batch_decode(*_a , **_a )
def __UpperCAmelCase ( self , *_a , **_a ):
return self.tokenizer.decode(*_a , **_a )
@property
def __UpperCAmelCase ( self ):
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def __UpperCAmelCase ( self ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _a , )
return self.image_processor_class
@property
def __UpperCAmelCase ( self ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _a , )
return self.image_processor
| 11 | 1 |
"""simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
__a = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__a = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_a )
__a = -1
__a = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_a )
__a = model.generate(_a , max_new_tokens=10 , do_sample=_a )
__a = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
__a = TextStreamer(_a )
model.generate(_a , max_new_tokens=10 , do_sample=_a , streamer=_a )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
__a = cs.out[:-1]
self.assertEqual(_a , _a )
def __UpperCAmelCase ( self ):
__a = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__a = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_a )
__a = -1
__a = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_a )
__a = model.generate(_a , max_new_tokens=10 , do_sample=_a )
__a = tokenizer.decode(greedy_ids[0] )
__a = TextIteratorStreamer(_a )
__a = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
__a = Thread(target=model.generate , kwargs=_a )
thread.start()
__a = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(_a , _a )
def __UpperCAmelCase ( self ):
__a = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__a = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_a )
__a = -1
__a = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_a )
__a = model.generate(_a , max_new_tokens=10 , do_sample=_a )
__a = greedy_ids[:, input_ids.shape[1] :]
__a = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
__a = TextStreamer(_a , skip_prompt=_a )
model.generate(_a , max_new_tokens=10 , do_sample=_a , streamer=_a )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
__a = cs.out[:-1]
self.assertEqual(_a , _a )
def __UpperCAmelCase ( self ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
__a = AutoTokenizer.from_pretrained('''distilgpt2''' )
__a = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(_a )
__a = -1
__a = torch.ones((1, 5) , device=_a ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
__a = TextStreamer(_a , skip_special_tokens=_a )
model.generate(_a , max_new_tokens=1 , do_sample=_a , streamer=_a )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
__a = cs.out[:-1] # Remove the final "\n"
__a = tokenizer(_a , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def __UpperCAmelCase ( self ):
__a = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__a = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_a )
__a = -1
__a = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_a )
__a = TextIteratorStreamer(_a , timeout=0.001 )
__a = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
__a = Thread(target=model.generate , kwargs=_a )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(_a ):
__a = ''''''
for new_text in streamer:
streamer_text += new_text
| 11 |
"""simple docstring"""
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowercase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any]=0.9_99 , lowerCAmelCase__ : List[str]="cosine" , ) -> Optional[int]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowerCAmelCase__ : int ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowerCAmelCase__ : Optional[Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
__a = []
for i in range(lowerCAmelCase__ ):
__a = i / num_diffusion_timesteps
__a = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowerCAmelCase__ ) / alpha_bar_fn(lowerCAmelCase__ ) , lowerCAmelCase__ ) )
return torch.tensor(lowerCAmelCase__ , dtype=torch.floataa )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Tuple = [e.name for e in KarrasDiffusionSchedulers]
__UpperCAmelCase : str = 2
@register_to_config
def __init__( self , _a = 1_000 , _a = 0.0_0085 , _a = 0.012 , _a = "linear" , _a = None , _a = "epsilon" , _a = "linspace" , _a = 0 , ):
if trained_betas is not None:
__a = torch.tensor(_a , dtype=torch.floataa )
elif beta_schedule == "linear":
__a = torch.linspace(_a , _a , _a , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__a = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , _a , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__a = betas_for_alpha_bar(_a )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
__a = 1.0 - self.betas
__a = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(_a , _a , _a )
def __UpperCAmelCase ( self , _a , _a=None ):
if schedule_timesteps is None:
__a = self.timesteps
__a = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
__a = 1 if len(_a ) > 1 else 0
else:
__a = timestep.cpu().item() if torch.is_tensor(_a ) else timestep
__a = self._index_counter[timestep_int]
return indices[pos].item()
@property
def __UpperCAmelCase ( self ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __UpperCAmelCase ( self , _a , _a , ):
__a = self.index_for_timestep(_a )
if self.state_in_first_order:
__a = self.sigmas[step_index]
else:
__a = self.sigmas_interpol[step_index]
__a = sample / ((sigma**2 + 1) ** 0.5)
return sample
def __UpperCAmelCase ( self , _a , _a = None , _a = None , ):
__a = num_inference_steps
__a = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
__a = np.linspace(0 , num_train_timesteps - 1 , _a , dtype=_a )[::-1].copy()
elif self.config.timestep_spacing == "leading":
__a = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__a = (np.arange(0 , _a ) * step_ratio).round()[::-1].copy().astype(_a )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
__a = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__a = (np.arange(_a , 0 , -step_ratio )).round().copy().astype(_a )
timesteps -= 1
else:
raise ValueError(
f'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
__a = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
__a = torch.from_numpy(np.log(_a ) ).to(_a )
__a = np.interp(_a , np.arange(0 , len(_a ) ) , _a )
__a = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
__a = torch.from_numpy(_a ).to(device=_a )
# interpolate sigmas
__a = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
__a = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
__a = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(_a ).startswith('''mps''' ):
# mps does not support float64
__a = torch.from_numpy(_a ).to(_a , dtype=torch.floataa )
else:
__a = torch.from_numpy(_a ).to(_a )
# interpolate timesteps
__a = self.sigma_to_t(_a ).to(_a , dtype=timesteps.dtype )
__a = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
__a = torch.cat([timesteps[:1], interleaved_timesteps] )
__a = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
__a = defaultdict(_a )
def __UpperCAmelCase ( self , _a ):
# get log sigma
__a = sigma.log()
# get distribution
__a = log_sigma - self.log_sigmas[:, None]
# get sigmas range
__a = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
__a = low_idx + 1
__a = self.log_sigmas[low_idx]
__a = self.log_sigmas[high_idx]
# interpolate sigmas
__a = (low - log_sigma) / (low - high)
__a = w.clamp(0 , 1 )
# transform interpolation to time range
__a = (1 - w) * low_idx + w * high_idx
__a = t.view(sigma.shape )
return t
@property
def __UpperCAmelCase ( self ):
return self.sample is None
def __UpperCAmelCase ( self , _a , _a , _a , _a = True , ):
__a = self.index_for_timestep(_a )
# advance index counter by 1
__a = timestep.cpu().item() if torch.is_tensor(_a ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
__a = self.sigmas[step_index]
__a = self.sigmas_interpol[step_index + 1]
__a = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
__a = self.sigmas[step_index - 1]
__a = self.sigmas_interpol[step_index]
__a = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
__a = 0
__a = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
__a = sigma_hat if self.state_in_first_order else sigma_interpol
__a = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
__a = sigma_hat if self.state_in_first_order else sigma_interpol
__a = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError('''prediction_type not implemented yet: sample''' )
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
__a = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
__a = sigma_interpol - sigma_hat
# store for 2nd order step
__a = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
__a = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
__a = sigma_next - sigma_hat
__a = self.sample
__a = None
__a = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_a )
def __UpperCAmelCase ( self , _a , _a , _a , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__a = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(_a ):
# mps does not support float64
__a = self.timesteps.to(original_samples.device , dtype=torch.floataa )
__a = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
__a = self.timesteps.to(original_samples.device )
__a = timesteps.to(original_samples.device )
__a = [self.index_for_timestep(_a , _a ) for t in timesteps]
__a = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
__a = sigma.unsqueeze(-1 )
__a = original_samples + noise * sigma
return noisy_samples
def __len__( self ):
return self.config.num_train_timesteps
| 11 | 1 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : bool = False ) -> str:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__a = f'''Expected string as input, found {type(lowerCAmelCase__ )}'''
raise ValueError(lowerCAmelCase__ )
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__a = f'''Expected boolean as use_pascal parameter, found {type(lowerCAmelCase__ )}'''
raise ValueError(lowerCAmelCase__ )
__a = input_str.split('''_''' )
__a = 0 if use_pascal else 1
__a = words[start_index:]
__a = [word[0].upper() + word[1:] for word in words_to_capitalize]
__a = '''''' if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 11 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"RWKV/rwkv-4-169m-pile": "https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json",
"RWKV/rwkv-4-430m-pile": "https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json",
"RWKV/rwkv-4-1b5-pile": "https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json",
"RWKV/rwkv-4-3b-pile": "https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json",
"RWKV/rwkv-4-7b-pile": "https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json",
"RWKV/rwkv-4-14b-pile": "https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json",
"RWKV/rwkv-raven-1b5": "https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json",
"RWKV/rwkv-raven-3b": "https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json",
"RWKV/rwkv-raven-7b": "https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json",
"RWKV/rwkv-raven-14b": "https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json",
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Any = 'rwkv'
__UpperCAmelCase : Optional[Any] = {'max_position_embeddings': 'context_length'}
def __init__( self , _a=50_277 , _a=1_024 , _a=4_096 , _a=32 , _a=None , _a=None , _a=1E-5 , _a=0 , _a=0 , _a=6 , _a=False , _a=True , **_a , ):
__a = vocab_size
__a = context_length
__a = hidden_size
__a = num_hidden_layers
__a = attention_hidden_size if attention_hidden_size is not None else hidden_size
__a = intermediate_size if intermediate_size is not None else 4 * hidden_size
__a = layer_norm_epsilon
__a = rescale_every
__a = use_cache
__a = bos_token_id
__a = eos_token_id
super().__init__(
tie_word_embeddings=_a , bos_token_id=_a , eos_token_id=_a , **_a )
| 11 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=32 , _a=2 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=3 , _a=4 , _a=None , _a=1_000 , ):
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_input_mask
__a = use_token_type_ids
__a = use_labels
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = type_sequence_label_size
__a = initializer_range
__a = num_labels
__a = num_choices
__a = scope
__a = range_bbox
def __UpperCAmelCase ( self ):
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
__a = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__a = bbox[i, j, 3]
__a = bbox[i, j, 1]
__a = t
if bbox[i, j, 2] < bbox[i, j, 0]:
__a = bbox[i, j, 2]
__a = bbox[i, j, 0]
__a = t
__a = tf.convert_to_tensor(_a )
__a = None
if self.use_input_mask:
__a = random_attention_mask([self.batch_size, self.seq_length] )
__a = None
if self.use_token_type_ids:
__a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a = None
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a = ids_tensor([self.batch_size] , self.num_choices )
__a = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a , _a ):
__a = TFLayoutLMModel(config=_a )
__a = model(_a , _a , attention_mask=_a , token_type_ids=_a )
__a = model(_a , _a , token_type_ids=_a )
__a = model(_a , _a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a , _a ):
__a = TFLayoutLMForMaskedLM(config=_a )
__a = model(_a , _a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a , _a ):
__a = self.num_labels
__a = TFLayoutLMForSequenceClassification(config=_a )
__a = model(_a , _a , attention_mask=_a , token_type_ids=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a , _a ):
__a = self.num_labels
__a = TFLayoutLMForTokenClassification(config=_a )
__a = model(_a , _a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a , _a ):
__a = TFLayoutLMForQuestionAnswering(config=_a )
__a = model(_a , _a , attention_mask=_a , token_type_ids=_a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCAmelCase ( self ):
__a = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = config_and_inputs
__a = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : str = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
__UpperCAmelCase : Tuple = (
{
'feature-extraction': TFLayoutLMModel,
'fill-mask': TFLayoutLMForMaskedLM,
'text-classification': TFLayoutLMForSequenceClassification,
'token-classification': TFLayoutLMForTokenClassification,
'zero-shot': TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
__UpperCAmelCase : Optional[Any] = False
__UpperCAmelCase : List[Any] = True
__UpperCAmelCase : Optional[Any] = 1_0
def __UpperCAmelCase ( self ):
__a = TFLayoutLMModelTester(self )
__a = ConfigTester(self , config_class=_a , hidden_size=37 )
def __UpperCAmelCase ( self ):
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_a )
@slow
def __UpperCAmelCase ( self ):
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = TFLayoutLMModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@unittest.skip('''Onnx compliancy broke with TF 2.10''' )
def __UpperCAmelCase ( self ):
pass
def lowercase ( ) -> Optional[int]:
# Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on:
# fmt: off
__a = tf.convert_to_tensor([[101,1019,1014,1016,1037,12849,4747,1004,14246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,11300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,19274,2772,6205,27814,16147,16147,4343,2047,10283,10969,14389,1012,2338,102]] ) # noqa: E231
__a = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
__a = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]] ) # noqa: E231
__a = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
__a = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self ):
__a = TFLayoutLMModel.from_pretrained('''microsoft/layoutlm-base-uncased''' )
__a , __a , __a , __a , __a = prepare_layoutlm_batch_inputs()
# forward pass
__a = model(input_ids=_a , bbox=_a , attention_mask=_a , token_type_ids=_a )
# test the sequence output on [0, :3, :3]
__a = tf.convert_to_tensor(
[[0.1785, -0.1947, -0.0425], [-0.3254, -0.2807, 0.2553], [-0.5391, -0.3322, 0.3364]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _a , atol=1E-3 ) )
# test the pooled output on [1, :3]
__a = tf.convert_to_tensor([-0.6580, -0.0214, 0.8552] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , _a , atol=1E-3 ) )
@slow
def __UpperCAmelCase ( self ):
# initialize model with randomly initialized sequence classification head
__a = TFLayoutLMForSequenceClassification.from_pretrained('''microsoft/layoutlm-base-uncased''' , num_labels=2 )
__a , __a , __a , __a , __a = prepare_layoutlm_batch_inputs()
# forward pass
__a = model(
input_ids=_a , bbox=_a , attention_mask=_a , token_type_ids=_a , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
__a = outputs.loss
__a = (2,)
self.assertEqual(loss.shape , _a )
# test the shape of the logits
__a = outputs.logits
__a = (2, 2)
self.assertEqual(logits.shape , _a )
@slow
def __UpperCAmelCase ( self ):
# initialize model with randomly initialized token classification head
__a = TFLayoutLMForTokenClassification.from_pretrained('''microsoft/layoutlm-base-uncased''' , num_labels=13 )
__a , __a , __a , __a , __a = prepare_layoutlm_batch_inputs()
# forward pass
__a = model(
input_ids=_a , bbox=_a , attention_mask=_a , token_type_ids=_a , labels=_a )
# test the shape of the logits
__a = outputs.logits
__a = tf.convert_to_tensor((2, 25, 13) )
self.assertEqual(logits.shape , _a )
@slow
def __UpperCAmelCase ( self ):
# initialize model with randomly initialized token classification head
__a = TFLayoutLMForQuestionAnswering.from_pretrained('''microsoft/layoutlm-base-uncased''' )
__a , __a , __a , __a , __a = prepare_layoutlm_batch_inputs()
# forward pass
__a = model(input_ids=_a , bbox=_a , attention_mask=_a , token_type_ids=_a )
# test the shape of the logits
__a = tf.convert_to_tensor((2, 25) )
self.assertEqual(outputs.start_logits.shape , _a )
self.assertEqual(outputs.end_logits.shape , _a )
| 11 |
"""simple docstring"""
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : List[str] = (UnCLIPScheduler,)
def __UpperCAmelCase ( self , **_a ):
__a = {
'''num_train_timesteps''': 1_000,
'''variance_type''': '''fixed_small_log''',
'''clip_sample''': True,
'''clip_sample_range''': 1.0,
'''prediction_type''': '''epsilon''',
}
config.update(**_a )
return config
def __UpperCAmelCase ( self ):
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_a )
def __UpperCAmelCase ( self ):
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=_a )
def __UpperCAmelCase ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_a )
def __UpperCAmelCase ( self ):
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=_a )
def __UpperCAmelCase ( self ):
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=_a )
def __UpperCAmelCase ( self ):
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=_a , prev_timestep=_a )
def __UpperCAmelCase ( self ):
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config(variance_type='''fixed_small_log''' )
__a = scheduler_class(**_a )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0_000E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.054_9625 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.999_4987 ) ) < 1E-5
def __UpperCAmelCase ( self ):
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config(variance_type='''learned_range''' )
__a = scheduler_class(**_a )
__a = 0.5
assert scheduler._get_variance(1 , predicted_variance=_a ) - -10.171_2790 < 1E-5
assert scheduler._get_variance(487 , predicted_variance=_a ) - -5.799_8052 < 1E-5
assert scheduler._get_variance(999 , predicted_variance=_a ) - -0.001_0011 < 1E-5
def __UpperCAmelCase ( self ):
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**_a )
__a = scheduler.timesteps
__a = self.dummy_model()
__a = self.dummy_sample_deter
__a = torch.manual_seed(0 )
for i, t in enumerate(_a ):
# 1. predict noise residual
__a = model(_a , _a )
# 2. predict previous mean of sample x_t-1
__a = scheduler.step(_a , _a , _a , generator=_a ).prev_sample
__a = pred_prev_sample
__a = torch.sum(torch.abs(_a ) )
__a = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 252.268_2495 ) < 1E-2
assert abs(result_mean.item() - 0.328_4743 ) < 1E-3
def __UpperCAmelCase ( self ):
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**_a )
scheduler.set_timesteps(25 )
__a = scheduler.timesteps
__a = self.dummy_model()
__a = self.dummy_sample_deter
__a = torch.manual_seed(0 )
for i, t in enumerate(_a ):
# 1. predict noise residual
__a = model(_a , _a )
if i + 1 == timesteps.shape[0]:
__a = None
else:
__a = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
__a = scheduler.step(
_a , _a , _a , prev_timestep=_a , generator=_a ).prev_sample
__a = pred_prev_sample
__a = torch.sum(torch.abs(_a ) )
__a = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 258.204_4983 ) < 1E-2
assert abs(result_mean.item() - 0.336_2038 ) < 1E-3
def __UpperCAmelCase ( self ):
pass
def __UpperCAmelCase ( self ):
pass
| 11 | 1 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : int = 1000 ) -> int:
return sum(e for e in range(3 , lowerCAmelCase__ ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 11 |
"""simple docstring"""
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def lowercase ( lowerCAmelCase__ : Dict ) -> Optional[int]:
__a , __a = image.size
__a , __a = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
__a = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] )
__a = np.array(lowerCAmelCase__ ).astype(np.floataa ) / 2_55.0
__a = image[None].transpose(0 , 3 , 1 , 2 )
__a = torch.from_numpy(lowerCAmelCase__ )
return 2.0 * image - 1.0
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a , _a , _a , ):
super().__init__()
self.register_modules(vqvae=_a , unet=_a , scheduler=_a )
@torch.no_grad()
def __call__( self , _a = None , _a = 1 , _a = 100 , _a = 0.0 , _a = None , _a = "pil" , _a = True , ):
if isinstance(_a , PIL.Image.Image ):
__a = 1
elif isinstance(_a , torch.Tensor ):
__a = image.shape[0]
else:
raise ValueError(f'''`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(_a )}''' )
if isinstance(_a , PIL.Image.Image ):
__a = preprocess(_a )
__a , __a = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
__a = (batch_size, self.unet.config.in_channels // 2, height, width)
__a = next(self.unet.parameters() ).dtype
__a = randn_tensor(_a , generator=_a , device=self.device , dtype=_a )
__a = image.to(device=self.device , dtype=_a )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(_a , device=self.device )
__a = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
__a = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__a = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__a = {}
if accepts_eta:
__a = eta
for t in self.progress_bar(_a ):
# concat latents and low resolution image in the channel dimension.
__a = torch.cat([latents, image] , dim=1 )
__a = self.scheduler.scale_model_input(_a , _a )
# predict the noise residual
__a = self.unet(_a , _a ).sample
# compute the previous noisy sample x_t -> x_t-1
__a = self.scheduler.step(_a , _a , _a , **_a ).prev_sample
# decode the image latents with the VQVAE
__a = self.vqvae.decode(_a ).sample
__a = torch.clamp(_a , -1.0 , 1.0 )
__a = image / 2 + 0.5
__a = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__a = self.numpy_to_pil(_a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_a )
| 11 | 1 |
"""simple docstring"""
import sys
from collections import defaultdict
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self ):
__a = []
def __UpperCAmelCase ( self , _a ):
return self.node_position[vertex]
def __UpperCAmelCase ( self , _a , _a ):
__a = pos
def __UpperCAmelCase ( self , _a , _a , _a , _a ):
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__a = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__a = 2 * start + 1
else:
__a = 2 * start + 2
if heap[smallest_child] < heap[start]:
__a , __a = heap[smallest_child], positions[smallest_child]
__a , __a = (
heap[start],
positions[start],
)
__a , __a = temp, tempa
__a = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , _a )
self.top_to_bottom(_a , _a , _a , _a )
def __UpperCAmelCase ( self , _a , _a , _a , _a ):
__a = position[index]
while index != 0:
__a = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
__a = heap[parent]
__a = position[parent]
self.set_position(position[parent] , _a )
else:
__a = val
__a = temp
self.set_position(_a , _a )
break
__a = parent
else:
__a = val
__a = temp
self.set_position(_a , 0 )
def __UpperCAmelCase ( self , _a , _a ):
__a = len(_a ) // 2 - 1
for i in range(_a , -1 , -1 ):
self.top_to_bottom(_a , _a , len(_a ) , _a )
def __UpperCAmelCase ( self , _a , _a ):
__a = positions[0]
__a = sys.maxsize
self.top_to_bottom(_a , 0 , len(_a ) , _a )
return temp
def lowercase ( lowerCAmelCase__ : Tuple ) -> Dict:
__a = Heap()
__a = [0] * len(lowerCAmelCase__ )
__a = [-1] * len(lowerCAmelCase__ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__a = [] # Heap of Distance of vertices from their neighboring vertex
__a = []
for vertex in range(len(lowerCAmelCase__ ) ):
distance_tv.append(sys.maxsize )
positions.append(lowerCAmelCase__ )
heap.node_position.append(lowerCAmelCase__ )
__a = []
__a = 1
__a = sys.maxsize
for neighbor, distance in adjacency_list[0]:
__a = 0
__a = distance
heap.heapify(lowerCAmelCase__ , lowerCAmelCase__ )
for _ in range(1 , len(lowerCAmelCase__ ) ):
__a = heap.delete_minimum(lowerCAmelCase__ , lowerCAmelCase__ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
__a = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(lowerCAmelCase__ )]
):
__a = distance
heap.bottom_to_top(
lowerCAmelCase__ , heap.get_position(lowerCAmelCase__ ) , lowerCAmelCase__ , lowerCAmelCase__ )
__a = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
lowercase_ = int(input("Enter number of edges: ").strip())
lowercase_ = defaultdict(list)
for _ in range(edges_number):
lowercase_ = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 11 |
"""simple docstring"""
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Dict = DistilBertTokenizer
__UpperCAmelCase : Any = DistilBertTokenizerFast
__UpperCAmelCase : int = True
@slow
def __UpperCAmelCase ( self ):
__a = DistilBertTokenizer.from_pretrained('''distilbert-base-uncased''' )
__a = tokenizer.encode('''sequence builders''' , add_special_tokens=_a )
__a = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_a )
__a = tokenizer.build_inputs_with_special_tokens(_a )
__a = tokenizer.build_inputs_with_special_tokens(_a , _a )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 11 | 1 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : str = ['image_processor', 'tokenizer']
__UpperCAmelCase : str = 'LayoutLMv3ImageProcessor'
__UpperCAmelCase : Optional[int] = ('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast')
def __init__( self , _a=None , _a=None , **_a ):
__a = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _a , )
__a = kwargs.pop('''feature_extractor''' )
__a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_a , _a )
def __call__( self , _a , _a = None , _a = None , _a = None , _a = None , _a = True , _a = False , _a = None , _a = None , _a = 0 , _a = None , _a = None , _a = None , _a = False , _a = False , _a = False , _a = False , _a = True , _a = None , **_a , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
# first, apply the image processor
__a = self.image_processor(images=_a , return_tensors=_a )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(_a , _a ):
__a = [text] # add batch dimension (as the image processor always adds a batch dimension)
__a = features['''words''']
__a = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_token_type_ids=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
# add pixel values
__a = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
__a = self.get_overflowing_images(_a , encoded_inputs['''overflow_to_sample_mapping'''] )
__a = images
return encoded_inputs
def __UpperCAmelCase ( self , _a , _a ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
__a = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(_a ) != len(_a ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f''' {len(_a )} and {len(_a )}''' )
return images_with_overflow
def __UpperCAmelCase ( self , *_a , **_a ):
return self.tokenizer.batch_decode(*_a , **_a )
def __UpperCAmelCase ( self , *_a , **_a ):
return self.tokenizer.decode(*_a , **_a )
@property
def __UpperCAmelCase ( self ):
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def __UpperCAmelCase ( self ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _a , )
return self.image_processor_class
@property
def __UpperCAmelCase ( self ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _a , )
return self.image_processor
| 11 |
"""simple docstring"""
from math import factorial, radians
def lowercase ( lowerCAmelCase__ : float , lowerCAmelCase__ : int = 18 , lowerCAmelCase__ : int = 10 ) -> float:
__a = angle_in_degrees - ((angle_in_degrees // 3_60.0) * 3_60.0)
# Converting from degrees to radians
__a = radians(lowerCAmelCase__ )
__a = angle_in_radians
__a = 3
__a = -1
for _ in range(lowerCAmelCase__ ):
result += (b * (angle_in_radians**a)) / factorial(lowerCAmelCase__ )
__a = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
__import__("doctest").testmod()
| 11 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json",
"junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json",
"junnyu/roformer_chinese_char_small": (
"https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json"
),
"junnyu/roformer_chinese_char_base": (
"https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json"
),
"junnyu/roformer_small_discriminator": (
"https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json"
),
"junnyu/roformer_small_generator": (
"https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json"
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Tuple = 'roformer'
def __init__( self , _a=50_000 , _a=None , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=1_536 , _a=2 , _a=0.02 , _a=1E-12 , _a=0 , _a=False , _a=True , **_a , ):
super().__init__(pad_token_id=_a , **_a )
__a = vocab_size
__a = hidden_size if embedding_size is None else embedding_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = layer_norm_eps
__a = rotary_value
__a = use_cache
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def __UpperCAmelCase ( self ):
if self.task == "multiple-choice":
__a = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__a = {0: '''batch''', 1: '''sequence'''}
__a = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 11 |
"""simple docstring"""
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def lowercase ( lowerCAmelCase__ : dict ) -> tuple:
return (data["data"], data["target"])
def lowercase ( lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : np.ndarray ) -> np.ndarray:
__a = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(lowerCAmelCase__ , lowerCAmelCase__ )
# Predict target for test data
__a = xgb.predict(lowerCAmelCase__ )
__a = predictions.reshape(len(lowerCAmelCase__ ) , 1 )
return predictions
def lowercase ( ) -> None:
__a = fetch_california_housing()
__a , __a = data_handling(lowerCAmelCase__ )
__a , __a , __a , __a = train_test_split(
lowerCAmelCase__ , lowerCAmelCase__ , test_size=0.25 , random_state=1 )
__a = xgboost(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Error printing
print(f'''Mean Absolute Error : {mean_absolute_error(lowerCAmelCase__ , lowerCAmelCase__ )}''' )
print(f'''Mean Square Error : {mean_squared_error(lowerCAmelCase__ , lowerCAmelCase__ )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 11 | 1 |
"""simple docstring"""
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a , _a = None , _a = None , _a = None , _a = False , _a = False , _a = None , **_a , ):
super().__init__(
_a , split=_a , features=_a , cache_dir=_a , keep_in_memory=_a , streaming=_a , num_proc=_a , **_a , )
__a = path_or_paths if isinstance(_a , _a ) else {self.split: path_or_paths}
__a = Text(
cache_dir=_a , data_files=_a , features=_a , **_a , )
def __UpperCAmelCase ( self ):
# Build iterable dataset
if self.streaming:
__a = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__a = None
__a = None
__a = None
__a = None
self.builder.download_and_prepare(
download_config=_a , download_mode=_a , verification_mode=_a , base_path=_a , num_proc=self.num_proc , )
__a = self.builder.as_dataset(
split=self.split , verification_mode=_a , in_memory=self.keep_in_memory )
return dataset
| 11 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase_ = {
"configuration_efficientformer": [
"EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientFormerConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["EfficientFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientFormerForImageClassification",
"EfficientFormerForImageClassificationWithTeacher",
"EfficientFormerModel",
"EfficientFormerPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFEfficientFormerForImageClassification",
"TFEfficientFormerForImageClassificationWithTeacher",
"TFEfficientFormerModel",
"TFEfficientFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 11 | 1 |
"""simple docstring"""
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
lowercase_ = get_logger(__name__)
lowercase_ = r"\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n"
class __lowerCAmelCase :
'''simple docstring'''
@add_start_docstrings(_a )
def __call__( self , _a , _a ):
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class __lowerCAmelCase :
'''simple docstring'''
@add_start_docstrings(_a )
def __call__( self , _a , _a ):
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@add_start_docstrings(_a )
def __call__( self , _a , _a , _a , **_a ):
for processor in self:
__a = inspect.signature(processor.__call__ ).parameters
if len(_a ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
f'''Make sure that all the required parameters: {list(function_args.keys() )} for '''
f'''{processor.__class__} are passed to the logits processor.''' )
__a = processor(_a , _a , _a , **_a )
else:
__a = processor(_a , _a , _a )
return scores
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a ):
if not isinstance(_a , _a ) or not (temperature > 0):
raise ValueError(f'''`temperature` has to be a strictly positive float, but is {temperature}''' )
__a = temperature
def __call__( self , _a , _a , _a ):
__a = scores / self.temperature
return scores
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a , _a = -float('''Inf''' ) , _a = 1 ):
if not isinstance(_a , _a ) or (top_p < 0 or top_p > 1.0):
raise ValueError(f'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' )
if not isinstance(_a , _a ) or (min_tokens_to_keep < 1):
raise ValueError(f'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' )
__a = top_p
__a = filter_value
__a = min_tokens_to_keep
def __call__( self , _a , _a , _a ):
__a , __a = lax.top_k(_a , scores.shape[-1] )
__a = jnp.full_like(_a , self.filter_value )
__a = jax.nn.softmax(_a , axis=-1 ).cumsum(axis=-1 )
__a = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
__a = jnp.roll(_a , 1 )
score_mask |= score_mask.at[:, 0].set(_a )
# min tokens to keep
__a = score_mask.at[:, : self.min_tokens_to_keep].set(_a )
__a = jnp.where(_a , _a , _a )
__a = jax.lax.sort_key_val(_a , _a )[-1]
return next_scores
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a , _a = -float('''Inf''' ) , _a = 1 ):
if not isinstance(_a , _a ) or top_k <= 0:
raise ValueError(f'''`top_k` has to be a strictly positive integer, but is {top_k}''' )
__a = max(_a , _a )
__a = filter_value
def __call__( self , _a , _a , _a ):
__a , __a = scores.shape
__a = jnp.full(batch_size * vocab_size , self.filter_value )
__a = min(self.top_k , scores.shape[-1] ) # Safety check
__a , __a = lax.top_k(_a , _a )
__a = jnp.broadcast_to((jnp.arange(_a ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
__a = topk_scores.flatten()
__a = topk_indices.flatten() + shift
__a = next_scores_flat.at[topk_indices_flat].set(_a )
__a = next_scores_flat.reshape(_a , _a )
return next_scores
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a ):
__a = bos_token_id
def __call__( self , _a , _a , _a ):
__a = jnp.full(scores.shape , -float('''inf''' ) )
__a = 1 - jnp.bool_(cur_len - 1 )
__a = jnp.where(_a , new_scores.at[:, self.bos_token_id].set(0 ) , _a )
return scores
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a , _a ):
__a = max_length
__a = eos_token_id
def __call__( self , _a , _a , _a ):
__a = jnp.full(scores.shape , -float('''inf''' ) )
__a = 1 - jnp.bool_(cur_len - self.max_length + 1 )
__a = jnp.where(_a , new_scores.at[:, self.eos_token_id].set(0 ) , _a )
return scores
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a , _a ):
if not isinstance(_a , _a ) or min_length < 0:
raise ValueError(f'''`min_length` has to be a positive integer, but is {min_length}''' )
if not isinstance(_a , _a ) or eos_token_id < 0:
raise ValueError(f'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' )
__a = min_length
__a = eos_token_id
def __call__( self , _a , _a , _a ):
# create boolean flag to decide if min length penalty should be applied
__a = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
__a = jnp.where(_a , scores.at[:, self.eos_token_id].set(-float('''inf''' ) ) , _a )
return scores
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a , _a ):
__a = list(_a )
__a = begin_index
def __call__( self , _a , _a , _a ):
__a = 1 - jnp.bool_(cur_len - self.begin_index )
__a = jnp.where(_a , scores.at[:, self.begin_suppress_tokens].set(-float('''inf''' ) ) , _a )
return scores
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a ):
__a = list(_a )
def __call__( self , _a , _a , _a ):
__a = scores.at[..., self.suppress_tokens].set(-float('''inf''' ) )
return scores
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a ):
__a = dict(_a )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
__a = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
__a = force_token_array.at[index].set(_a )
__a = jnp.intaa(_a )
def __call__( self , _a , _a , _a ):
def _force_token(_a ):
__a = scores.shape[0]
__a = self.force_token_array[generation_idx]
__a = jnp.ones_like(_a , dtype=scores.dtype ) * -float('''inf''' )
__a = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
__a = lax.dynamic_update_slice(_a , _a , (0, current_token) )
return new_scores
__a = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(_a ) , lambda: scores , ) , )
return scores
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a , _a , _a ):
__a = generate_config.eos_token_id
__a = generate_config.no_timestamps_token_id
__a = generate_config.no_timestamps_token_id + 1
__a = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(_a , '''max_initial_timestamp_index''' ):
__a = generate_config.max_initial_timestamp_index
else:
__a = model_config.vocab_size
if self.max_initial_timestamp_index is None:
__a = model_config.vocab_size
def __call__( self , _a , _a , _a ):
# suppress <|notimestamps|> which is handled by without_timestamps
__a = scores.at[:, self.no_timestamps_token_id].set(-float('''inf''' ) )
def handle_pairs(_a , _a ):
__a = jnp.where((cur_len - self.begin_index) >= 1 , _a , _a )
__a = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , _a , )
__a = jnp.where((cur_len - self.begin_index) < 2 , _a , _a )
__a = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , _a , _a , )
return jnp.where(
_a , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float('''inf''' ) ) , scores_k.at[: self.eos_token_id].set(-float('''inf''' ) ) , ) , _a , )
__a = jax.vmap(_a )(_a , _a )
__a = jnp.where(cur_len == self.begin_index , _a , _a )
__a = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , _a , )
__a = self.timestamp_begin + self.max_initial_timestamp_index
__a = jnp.where(
_a , scores.at[:, last_allowed + 1 :].set(-float('''inf''' ) ) , _a , )
# if sum of probability over timestamps is above any other token, sample timestamp
__a = jax.nn.log_softmax(_a , axis=-1 )
def handle_cumulative_probs(_a , _a ):
__a = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
__a = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float('''inf''' ) ) , _a , )
__a = jax.vmap(_a )(_a , _a )
return scores
| 11 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/config.json",
"funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json",
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/config.json",
"funnel-transformer/medium-base": "https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json",
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/config.json",
"funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json",
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json",
"funnel-transformer/xlarge-base": "https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json",
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = 'funnel'
__UpperCAmelCase : Union[str, Any] = {
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
}
def __init__( self , _a=30_522 , _a=[4, 4, 4] , _a=None , _a=2 , _a=768 , _a=12 , _a=64 , _a=3_072 , _a="gelu_new" , _a=0.1 , _a=0.1 , _a=0.0 , _a=0.1 , _a=None , _a=1E-9 , _a="mean" , _a="relative_shift" , _a=True , _a=True , _a=True , **_a , ):
__a = vocab_size
__a = block_sizes
__a = [1] * len(_a ) if block_repeats is None else block_repeats
assert len(_a ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
__a = num_decoder_layers
__a = d_model
__a = n_head
__a = d_head
__a = d_inner
__a = hidden_act
__a = hidden_dropout
__a = attention_dropout
__a = activation_dropout
__a = initializer_range
__a = initializer_std
__a = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], f'''Got {pooling_type} for `pooling_type` but only \'mean\' and \'max\' are supported.'''
__a = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], f'''Got {attention_type} for `attention_type` but only \'relative_shift\' and \'factorized\' are supported.'''
__a = attention_type
__a = separate_cls
__a = truncate_seq
__a = pool_q_only
super().__init__(**_a )
@property
def __UpperCAmelCase ( self ):
return sum(self.block_sizes )
@num_hidden_layers.setter
def __UpperCAmelCase ( self , _a ):
raise NotImplementedError(
'''This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.''' )
@property
def __UpperCAmelCase ( self ):
return len(self.block_sizes )
@num_blocks.setter
def __UpperCAmelCase ( self , _a ):
raise NotImplementedError('''This model does not support the setting of `num_blocks`. Please set `block_sizes`.''' )
| 11 | 1 |
"""simple docstring"""
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
lowercase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ):
super().__init__()
if hasattr(scheduler.config , '''steps_offset''' ) and scheduler.config.steps_offset != 1:
__a = (
f'''The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'''
f''' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '''
'''to update the config accordingly as leaving `steps_offset` might led to incorrect results'''
''' in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,'''
''' it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`'''
''' file'''
)
deprecate('''steps_offset!=1''' , '''1.0.0''' , _a , standard_warn=_a )
__a = dict(scheduler.config )
__a = 1
__a = FrozenDict(_a )
if hasattr(scheduler.config , '''skip_prk_steps''' ) and scheduler.config.skip_prk_steps is False:
__a = (
f'''The configuration file of this scheduler: {scheduler} has not set the configuration'''
''' `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make'''
''' sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to'''
''' incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face'''
''' Hub, it would be very nice if you could open a Pull request for the'''
''' `scheduler/scheduler_config.json` file'''
)
deprecate('''skip_prk_steps not set''' , '''1.0.0''' , _a , standard_warn=_a )
__a = dict(scheduler.config )
__a = True
__a = FrozenDict(_a )
if safety_checker is None:
logger.warning(
f'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
segmentation_model=_a , segmentation_processor=_a , vae=_a , text_encoder=_a , tokenizer=_a , unet=_a , scheduler=_a , safety_checker=_a , feature_extractor=_a , )
def __UpperCAmelCase ( self , _a = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__a = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_a )
def __UpperCAmelCase ( self ):
self.enable_attention_slicing(_a )
def __UpperCAmelCase ( self ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
__a = torch.device('''cuda''' )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(_a , _a )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __UpperCAmelCase ( self ):
if self.device != torch.device('''meta''' ) or not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_a , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self , _a , _a , _a , _a = 512 , _a = 512 , _a = 50 , _a = 7.5 , _a = None , _a = 1 , _a = 0.0 , _a = None , _a = None , _a = "pil" , _a = True , _a = None , _a = 1 , **_a , ):
__a = self.segmentation_processor(
text=[text] , images=[image] , padding='''max_length''' , return_tensors='''pt''' ).to(self.device )
__a = self.segmentation_model(**_a )
__a = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
__a = self.numpy_to_pil(_a )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
__a = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=_a , image=_a , mask_image=_a , height=_a , width=_a , num_inference_steps=_a , guidance_scale=_a , negative_prompt=_a , num_images_per_prompt=_a , eta=_a , generator=_a , latents=_a , output_type=_a , return_dict=_a , callback=_a , callback_steps=_a , )
| 11 |
"""simple docstring"""
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
__a = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_a , '''width_multiplier''' ) )
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a , _a=13 , _a=64 , _a=2 , _a=3 , _a="swish" , _a=3 , _a=32 , _a=0.1 , _a=0.02 , _a=True , _a=True , _a=10 , _a=None , _a=0.25 , _a=0.0 , _a=0.0 , ):
__a = parent
__a = batch_size
__a = image_size
__a = patch_size
__a = num_channels
__a = make_divisible(512 * width_multiplier , divisor=8 )
__a = hidden_act
__a = conv_kernel_size
__a = output_stride
__a = classifier_dropout_prob
__a = use_labels
__a = is_training
__a = num_labels
__a = initializer_range
__a = scope
__a = width_multiplier
__a = ffn_dropout
__a = attn_dropout
def __UpperCAmelCase ( self ):
__a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.num_labels )
__a = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__a = self.get_config()
return config, pixel_values, labels, pixel_labels
def __UpperCAmelCase ( self ):
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def __UpperCAmelCase ( self , _a , _a , _a , _a ):
__a = MobileViTVaModel(config=_a )
model.to(_a )
model.eval()
__a = model(_a )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __UpperCAmelCase ( self , _a , _a , _a , _a ):
__a = self.num_labels
__a = MobileViTVaForImageClassification(_a )
model.to(_a )
model.eval()
__a = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self , _a , _a , _a , _a ):
__a = self.num_labels
__a = MobileViTVaForSemanticSegmentation(_a )
model.to(_a )
model.eval()
__a = model(_a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__a = model(_a , labels=_a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __UpperCAmelCase ( self ):
__a = self.prepare_config_and_inputs()
__a , __a , __a , __a = config_and_inputs
__a = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
__UpperCAmelCase : Union[str, Any] = (
{
'feature-extraction': MobileViTVaModel,
'image-classification': MobileViTVaForImageClassification,
'image-segmentation': MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : Union[str, Any] = False
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : List[str] = False
def __UpperCAmelCase ( self ):
__a = MobileViTVaModelTester(self )
__a = MobileViTVaConfigTester(self , config_class=_a , has_text_modality=_a )
def __UpperCAmelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileViTV2 does not use inputs_embeds''' )
def __UpperCAmelCase ( self ):
pass
@unittest.skip(reason='''MobileViTV2 does not support input and output embeddings''' )
def __UpperCAmelCase ( self ):
pass
@unittest.skip(reason='''MobileViTV2 does not output attentions''' )
def __UpperCAmelCase ( self ):
pass
@require_torch_multi_gpu
@unittest.skip(reason='''Got `CUDA error: misaligned address` for tests after this one being run.''' )
def __UpperCAmelCase ( self ):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __UpperCAmelCase ( self ):
pass
def __UpperCAmelCase ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(_a )
__a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __UpperCAmelCase ( self ):
def check_hidden_states_output(_a , _a , _a ):
__a = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(_a , _a ) )
__a = outputs.hidden_states
__a = 5
self.assertEqual(len(_a ) , _a )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__a = 2
for i in range(len(_a ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a = True
check_hidden_states_output(_a , _a , _a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_a )
@slow
def __UpperCAmelCase ( self ):
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = MobileViTVaModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def lowercase ( ) -> str:
__a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCAmelCase ( self ):
return (
MobileViTImageProcessor.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' )
if is_vision_available()
else None
)
@slow
def __UpperCAmelCase ( self ):
__a = MobileViTVaForImageClassification.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' ).to(
_a )
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
__a = model(**_a )
# verify the logits
__a = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , _a )
__a = torch.tensor([-1.6_336E00, -7.3_204E-02, -5.1_883E-01] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1E-4 ) )
@slow
def __UpperCAmelCase ( self ):
__a = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
__a = model.to(_a )
__a = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
__a = prepare_img()
__a = image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
__a = model(**_a )
__a = outputs.logits
# verify the logits
__a = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , _a )
__a = torch.tensor(
[
[[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]],
[[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]],
[[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]],
] , device=_a , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _a , atol=1E-4 ) )
@slow
def __UpperCAmelCase ( self ):
__a = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
__a = model.to(_a )
__a = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
__a = prepare_img()
__a = image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
__a = model(**_a )
__a = outputs.logits.detach().cpu()
__a = image_processor.post_process_semantic_segmentation(outputs=_a , target_sizes=[(50, 60)] )
__a = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , _a )
__a = image_processor.post_process_semantic_segmentation(outputs=_a )
__a = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , _a )
| 11 | 1 |
"""simple docstring"""
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
lowercase_ = datasets.logging.get_logger(__name__)
lowercase_ = "\\n@inproceedings{bleurt,\n title={BLEURT: Learning Robust Metrics for Text Generation},\n author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},\n booktitle={ACL},\n year={2020},\n url={https://arxiv.org/abs/2004.04696}\n}\n"
lowercase_ = "\\nBLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)\nand then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune\nit for your specific application (the latter is expected to perform better).\n\nSee the project's README at https://github.com/google-research/bleurt#readme for more information.\n"
lowercase_ = "\nBLEURT score.\n\nArgs:\n `predictions` (list of str): prediction/candidate sentences\n `references` (list of str): reference sentences\n `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.\n\nReturns:\n 'scores': List of scores.\nExamples:\n\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> bleurt = datasets.load_metric(\"bleurt\")\n >>> results = bleurt.compute(predictions=predictions, references=references)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [1.03, 1.04]\n"
lowercase_ = {
"bleurt-tiny-128": "https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip",
"bleurt-tiny-512": "https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip",
"bleurt-base-128": "https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip",
"bleurt-base-512": "https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip",
"bleurt-large-128": "https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip",
"bleurt-large-512": "https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip",
"BLEURT-20-D3": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip",
"BLEURT-20-D6": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip",
"BLEURT-20-D12": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip",
"BLEURT-20": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip",
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/google-research/bleurt''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/google-research/bleurt'''] , reference_urls=['''https://github.com/google-research/bleurt''', '''https://arxiv.org/abs/2004.04696'''] , )
def __UpperCAmelCase ( self , _a ):
# check that config name specifies a valid BLEURT model
if self.config_name == "default":
logger.warning(
'''Using default BLEURT-Base checkpoint for sequence maximum length 128. '''
'''You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\').''' )
__a = '''bleurt-base-128'''
if self.config_name.lower() in CHECKPOINT_URLS:
__a = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
__a = self.config_name.upper()
else:
raise KeyError(
f'''{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}''' )
# download the model checkpoint specified by self.config_name and set up the scorer
__a = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
__a = score.BleurtScorer(os.path.join(_a , _a ) )
def __UpperCAmelCase ( self , _a , _a ):
__a = self.scorer.score(references=_a , candidates=_a )
return {"scores": scores}
| 11 |
"""simple docstring"""
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
lowercase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ):
super().__init__()
if hasattr(scheduler.config , '''steps_offset''' ) and scheduler.config.steps_offset != 1:
__a = (
f'''The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'''
f''' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '''
'''to update the config accordingly as leaving `steps_offset` might led to incorrect results'''
''' in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,'''
''' it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`'''
''' file'''
)
deprecate('''steps_offset!=1''' , '''1.0.0''' , _a , standard_warn=_a )
__a = dict(scheduler.config )
__a = 1
__a = FrozenDict(_a )
if hasattr(scheduler.config , '''skip_prk_steps''' ) and scheduler.config.skip_prk_steps is False:
__a = (
f'''The configuration file of this scheduler: {scheduler} has not set the configuration'''
''' `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make'''
''' sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to'''
''' incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face'''
''' Hub, it would be very nice if you could open a Pull request for the'''
''' `scheduler/scheduler_config.json` file'''
)
deprecate('''skip_prk_steps not set''' , '''1.0.0''' , _a , standard_warn=_a )
__a = dict(scheduler.config )
__a = True
__a = FrozenDict(_a )
if safety_checker is None:
logger.warning(
f'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
segmentation_model=_a , segmentation_processor=_a , vae=_a , text_encoder=_a , tokenizer=_a , unet=_a , scheduler=_a , safety_checker=_a , feature_extractor=_a , )
def __UpperCAmelCase ( self , _a = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__a = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_a )
def __UpperCAmelCase ( self ):
self.enable_attention_slicing(_a )
def __UpperCAmelCase ( self ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
__a = torch.device('''cuda''' )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(_a , _a )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __UpperCAmelCase ( self ):
if self.device != torch.device('''meta''' ) or not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_a , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self , _a , _a , _a , _a = 512 , _a = 512 , _a = 50 , _a = 7.5 , _a = None , _a = 1 , _a = 0.0 , _a = None , _a = None , _a = "pil" , _a = True , _a = None , _a = 1 , **_a , ):
__a = self.segmentation_processor(
text=[text] , images=[image] , padding='''max_length''' , return_tensors='''pt''' ).to(self.device )
__a = self.segmentation_model(**_a )
__a = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
__a = self.numpy_to_pil(_a )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
__a = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=_a , image=_a , mask_image=_a , height=_a , width=_a , num_inference_steps=_a , guidance_scale=_a , negative_prompt=_a , num_images_per_prompt=_a , eta=_a , generator=_a , latents=_a , output_type=_a , return_dict=_a , callback=_a , callback_steps=_a , )
| 11 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
lowercase_ = {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/config.json",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/config.json",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/config.json",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/config.json",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/config.json",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/config.json",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json",
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = 'albert'
def __init__( self , _a=30_000 , _a=128 , _a=4_096 , _a=12 , _a=1 , _a=64 , _a=16_384 , _a=1 , _a="gelu_new" , _a=0 , _a=0 , _a=512 , _a=2 , _a=0.02 , _a=1E-12 , _a=0.1 , _a="absolute" , _a=0 , _a=2 , _a=3 , **_a , ):
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
__a = vocab_size
__a = embedding_size
__a = hidden_size
__a = num_hidden_layers
__a = num_hidden_groups
__a = num_attention_heads
__a = inner_group_num
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = layer_norm_eps
__a = classifier_dropout_prob
__a = position_embedding_type
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def __UpperCAmelCase ( self ):
if self.task == "multiple-choice":
__a = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__a = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 11 |
"""simple docstring"""
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
lowercase_ = logging.get_logger(__name__)
@add_end_docstrings(__SCREAMING_SNAKE_CASE )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , **_a ):
super().__init__(**_a )
if self.framework != "pt":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
# No specific FOR_XXX available yet
def __call__( self , _a , **_a ):
return super().__call__(_a , **_a )
def __UpperCAmelCase ( self , **_a ):
__a = {}
if "candidate_labels" in kwargs:
__a = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
__a = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def __UpperCAmelCase ( self , _a , _a=None , _a="This is a sound of {}." ):
if isinstance(_a , _a ):
if audio.startswith('''http://''' ) or audio.startswith('''https://''' ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
__a = requests.get(_a ).content
else:
with open(_a , '''rb''' ) as f:
__a = f.read()
if isinstance(_a , _a ):
__a = ffmpeg_read(_a , self.feature_extractor.sampling_rate )
if not isinstance(_a , np.ndarray ):
raise ValueError('''We expect a numpy ndarray as input''' )
if len(audio.shape ) != 1:
raise ValueError('''We expect a single channel audio input for ZeroShotAudioClassificationPipeline''' )
__a = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors='''pt''' )
__a = candidate_labels
__a = [hypothesis_template.format(_a ) for x in candidate_labels]
__a = self.tokenizer(_a , return_tensors=self.framework , padding=_a )
__a = [text_inputs]
return inputs
def __UpperCAmelCase ( self , _a ):
__a = model_inputs.pop('''candidate_labels''' )
__a = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] , _a ):
__a = text_inputs[0]
else:
# Batching case.
__a = text_inputs[0][0]
__a = self.model(**_a , **_a )
__a = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_audio,
}
return model_outputs
def __UpperCAmelCase ( self , _a ):
__a = model_outputs.pop('''candidate_labels''' )
__a = model_outputs['''logits'''][0]
if self.framework == "pt":
__a = logits.softmax(dim=0 )
__a = probs.tolist()
else:
raise ValueError('''`tf` framework not supported.''' )
__a = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(_a , _a ) , key=lambda _a : -x[0] )
]
return result
| 11 | 1 |
"""simple docstring"""
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowercase ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[str] ) -> List[Any]:
# Load checkpoint
__a = torch.load(lowerCAmelCase__ , map_location='''cpu''' )
__a = chkpt['''model''']
# We have the base model one level deeper than the original XLM repository
__a = {}
for k, v in state_dict.items():
if "pred_layer" in k:
__a = v
else:
__a = v
__a = chkpt['''params''']
__a = {n: v for n, v in config.items() if not isinstance(lowerCAmelCase__ , (torch.FloatTensor, numpy.ndarray) )}
__a = chkpt['''dico_word2id''']
__a = {s + '''</w>''' if s.find('''@@''' ) == -1 and i > 13 else s.replace('''@@''' , '''''' ): i for s, i in vocab.items()}
# Save pytorch-model
__a = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
__a = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
__a = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''vocab_file''']
print(f'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(lowerCAmelCase__ , lowerCAmelCase__ )
print(f'''Save configuration file to {pytorch_config_dump_path}''' )
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(lowerCAmelCase__ , indent=2 ) + '''\n''' )
print(f'''Save vocab file to {pytorch_config_dump_path}''' )
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(lowerCAmelCase__ , indent=2 ) + '''\n''' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--xlm_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowercase_ = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 11 |
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self , _a ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ):
__a = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(_a )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
__a = '''sgugger/tiny-distilbert-classification'''
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , only_pretrain_model=_a , )
__a = PyTorchBenchmark(_a )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , torchscript=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , fpaa=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = AutoConfig.from_pretrained(_a )
# set architectures equal to `None`
__a = None
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a , configs=[config] )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == '''cpu''' , '''Can\'t do half precision''' )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , fpaa=_a , multi_process=_a , )
__a = PyTorchBenchmark(_a )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = AutoConfig.from_pretrained(_a )
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a , configs=[config] )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tinier_bart'''
__a = AutoConfig.from_pretrained(_a )
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a , configs=[config] )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = AutoConfig.from_pretrained(_a )
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a , configs=[config] )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tinier_bart'''
__a = AutoConfig.from_pretrained(_a )
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a , configs=[config] )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , save_to_csv=_a , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_a , '''inf_time.csv''' ) , train_memory_csv_file=os.path.join(_a , '''train_mem.csv''' ) , inference_memory_csv_file=os.path.join(_a , '''inf_mem.csv''' ) , train_time_csv_file=os.path.join(_a , '''train_time.csv''' ) , env_info_csv_file=os.path.join(_a , '''env.csv''' ) , multi_process=_a , )
__a = PyTorchBenchmark(_a )
benchmark.run()
self.assertTrue(Path(os.path.join(_a , '''inf_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_a , '''train_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_a , '''inf_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_a , '''train_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_a , '''env.csv''' ) ).exists() )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(_a ):
self.assertTrue(hasattr(_a , '''sequential''' ) )
self.assertTrue(hasattr(_a , '''cumulative''' ) )
self.assertTrue(hasattr(_a , '''current''' ) )
self.assertTrue(hasattr(_a , '''total''' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_a , '''log.txt''' ) , log_print=_a , trace_memory_line_by_line=_a , multi_process=_a , )
__a = PyTorchBenchmark(_a )
__a = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(_a , '''log.txt''' ) ).exists() )
| 11 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.