code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all LED models at https://huggingface.co/models?filter=LED
UpperCamelCase = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
UpperCamelCase = {
"allenai/led-base-16384": 16_384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def __magic_name__ ( ) -> List[str]:
_lowercase : List[Any] = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
_lowercase : Tuple = bs[:]
_lowercase : Union[str, Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(SCREAMING_SNAKE_CASE )
cs.append(2**8 + n )
n += 1
_lowercase : Any = [chr(SCREAMING_SNAKE_CASE ) for n in cs]
return dict(zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Any:
_lowercase : Any = set()
_lowercase : Optional[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowercase : List[str] = char
return pairs
class lowerCAmelCase_ ( __snake_case ):
_UpperCamelCase : Optional[Any] = VOCAB_FILES_NAMES
_UpperCamelCase : str = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Optional[int] = ["input_ids", "attention_mask"]
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase="replace" , _lowerCAmelCase="<s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="<s>" , _lowerCAmelCase="<unk>" , _lowerCAmelCase="<pad>" , _lowerCAmelCase="<mask>" , _lowerCAmelCase=False , **_lowerCAmelCase , ):
_lowercase : Union[str, Any] = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else bos_token
_lowercase : Dict = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else eos_token
_lowercase : Optional[int] = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else sep_token
_lowercase : Union[str, Any] = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else cls_token
_lowercase : List[Any] = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else unk_token
_lowercase : Tuple = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_lowercase : Optional[int] = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else mask_token
super().__init__(
errors=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase , **_lowerCAmelCase , )
with open(_lowerCAmelCase , encoding='utf-8' ) as vocab_handle:
_lowercase : Optional[Any] = json.load(_lowerCAmelCase )
_lowercase : Tuple = {v: k for k, v in self.encoder.items()}
_lowercase : Optional[int] = errors # how to handle errors in decoding
_lowercase : Dict = bytes_to_unicode()
_lowercase : Tuple = {v: k for k, v in self.byte_encoder.items()}
with open(_lowerCAmelCase , encoding='utf-8' ) as merges_handle:
_lowercase : Optional[Any] = merges_handle.read().split('\n' )[1:-1]
_lowercase : List[str] = [tuple(merge.split() ) for merge in bpe_merges]
_lowercase : Optional[int] = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
_lowercase : int = {}
_lowercase : Optional[Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_lowercase : Any = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def __a ( self ):
return len(self.encoder )
def __a ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def __a ( self , _lowerCAmelCase ):
if token in self.cache:
return self.cache[token]
_lowercase : str = tuple(_lowerCAmelCase )
_lowercase : Tuple = get_pairs(_lowerCAmelCase )
if not pairs:
return token
while True:
_lowercase : Optional[int] = min(_lowerCAmelCase , key=lambda _lowerCAmelCase : self.bpe_ranks.get(_lowerCAmelCase , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
_lowercase , _lowercase : List[Any] = bigram
_lowercase : Any = []
_lowercase : Dict = 0
while i < len(_lowerCAmelCase ):
try:
_lowercase : List[Any] = word.index(_lowerCAmelCase , _lowerCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_lowercase : Any = j
if word[i] == first and i < len(_lowerCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_lowercase : Tuple = tuple(_lowerCAmelCase )
_lowercase : Tuple = new_word
if len(_lowerCAmelCase ) == 1:
break
else:
_lowercase : Optional[int] = get_pairs(_lowerCAmelCase )
_lowercase : Tuple = ' '.join(_lowerCAmelCase )
_lowercase : Optional[Any] = word
return word
def __a ( self , _lowerCAmelCase ):
_lowercase : Union[str, Any] = []
for token in re.findall(self.pat , _lowerCAmelCase ):
_lowercase : Any = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_lowerCAmelCase ).split(' ' ) )
return bpe_tokens
def __a ( self , _lowerCAmelCase ):
return self.encoder.get(_lowerCAmelCase , self.encoder.get(self.unk_token ) )
def __a ( self , _lowerCAmelCase ):
return self.decoder.get(_lowerCAmelCase )
def __a ( self , _lowerCAmelCase ):
_lowercase : Optional[int] = ''.join(_lowerCAmelCase )
_lowercase : List[str] = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def __a ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowercase : Union[str, Any] = os.path.join(
_lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
_lowercase : Tuple = os.path.join(
_lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(_lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_lowerCAmelCase , ensure_ascii=_lowerCAmelCase ) + '\n' )
_lowercase : Dict = 0
with open(_lowerCAmelCase , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _lowerCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
' Please check that the tokenizer is not corrupted!' )
_lowercase : Union[str, Any] = token_index
writer.write(' '.join(_lowerCAmelCase ) + '\n' )
index += 1
return vocab_file, merge_file
def __a ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowercase : Dict = [self.cls_token_id]
_lowercase : Optional[int] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __a ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCAmelCase )) + [1]
return [1] + ([0] * len(_lowerCAmelCase )) + [1, 1] + ([0] * len(_lowerCAmelCase )) + [1]
def __a ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
_lowercase : Dict = [self.sep_token_id]
_lowercase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __a ( self , _lowerCAmelCase , _lowerCAmelCase=False , **_lowerCAmelCase ):
_lowercase : Any = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_lowerCAmelCase ) > 0 and not text[0].isspace()):
_lowercase : Tuple = ' ' + text
return (text, kwargs)
def __a ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = PaddingStrategy.DO_NOT_PAD , _lowerCAmelCase = None , _lowerCAmelCase = None , ):
_lowercase : Any = super()._pad(
encoded_inputs=_lowerCAmelCase , max_length=_lowerCAmelCase , padding_strategy=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , )
# Load from model defaults
if return_attention_mask is None:
_lowercase : List[str] = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
_lowercase : int = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
_lowercase : List[Any] = len(encoded_inputs['global_attention_mask'] ) != len(_lowerCAmelCase )
if needs_to_be_padded:
_lowercase : Union[str, Any] = len(_lowerCAmelCase ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
_lowercase : Dict = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
_lowercase : Any = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs
| 677 |
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Optional[Any]:
_lowercase : Tuple = {}
_lowercase : str = tokenizer(example['content'] , truncation=SCREAMING_SNAKE_CASE )['input_ids']
_lowercase : List[str] = len(example['content'] ) / len(output['input_ids'] )
return output
UpperCamelCase = HfArgumentParser(PretokenizationArguments)
UpperCamelCase = parser.parse_args()
if args.num_workers is None:
UpperCamelCase = multiprocessing.cpu_count()
UpperCamelCase = AutoTokenizer.from_pretrained(args.tokenizer_dir)
UpperCamelCase = time.time()
UpperCamelCase = load_dataset(args.dataset_name, split="train")
print(f'''Dataset loaded in {time.time()-t_start:.2f}s''')
UpperCamelCase = time.time()
UpperCamelCase = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"repo_name",
"path",
"copies",
"size",
"content",
"license",
"hash",
"line_mean",
"line_max",
"alpha_frac",
"autogenerated",
],
)
print(f'''Dataset tokenized in {time.time()-t_start:.2f}s''')
UpperCamelCase = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(f'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 677 | 1 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json",
"allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json",
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"
),
}
class lowerCAmelCase_ ( __snake_case ):
_UpperCamelCase : Dict = "longformer"
def __init__( self , _lowerCAmelCase = 5_1_2 , _lowerCAmelCase = 2 , _lowerCAmelCase = 1 , _lowerCAmelCase = 0 , _lowerCAmelCase = 2 , _lowerCAmelCase = 3_0_5_2_2 , _lowerCAmelCase = 7_6_8 , _lowerCAmelCase = 1_2 , _lowerCAmelCase = 1_2 , _lowerCAmelCase = 3_0_7_2 , _lowerCAmelCase = "gelu" , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 5_1_2 , _lowerCAmelCase = 2 , _lowerCAmelCase = 0.02 , _lowerCAmelCase = 1E-12 , _lowerCAmelCase = False , **_lowerCAmelCase , ):
super().__init__(pad_token_id=_lowerCAmelCase , **_lowerCAmelCase )
_lowercase : Optional[int] = attention_window
_lowercase : str = sep_token_id
_lowercase : Optional[Any] = bos_token_id
_lowercase : List[Any] = eos_token_id
_lowercase : Optional[Any] = vocab_size
_lowercase : List[Any] = hidden_size
_lowercase : Union[str, Any] = num_hidden_layers
_lowercase : Optional[int] = num_attention_heads
_lowercase : List[str] = hidden_act
_lowercase : List[str] = intermediate_size
_lowercase : List[Any] = hidden_dropout_prob
_lowercase : str = attention_probs_dropout_prob
_lowercase : Any = max_position_embeddings
_lowercase : int = type_vocab_size
_lowercase : Optional[int] = initializer_range
_lowercase : List[Any] = layer_norm_eps
_lowercase : List[str] = onnx_export
class lowerCAmelCase_ ( __snake_case ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase = "default" , _lowerCAmelCase = None ):
super().__init__(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
_lowercase : str = True
@property
def __a ( self ):
if self.task == "multiple-choice":
_lowercase : List[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowercase : int = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('global_attention_mask', dynamic_axis),
] )
@property
def __a ( self ):
_lowercase : Optional[int] = super().outputs
if self.task == "default":
_lowercase : List[str] = {0: 'batch'}
return outputs
@property
def __a ( self ):
return 1E-4
@property
def __a ( self ):
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 1_4 )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ):
_lowercase : int = super().generate_dummy_inputs(
preprocessor=_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
_lowercase : str = torch.zeros_like(inputs['input_ids'] )
# make every second token global
_lowercase : Any = 1
return inputs
| 677 |
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
UpperCamelCase = logging.getLogger(__name__)
UpperCamelCase = {"facebook/bart-base": BartForConditionalGeneration}
UpperCamelCase = {"facebook/bart-base": BartTokenizer}
def __magic_name__ ( ) -> str:
_lowercase : Optional[int] = argparse.ArgumentParser(description='Export Bart model + Beam Search to ONNX graph.' )
parser.add_argument(
'--validation_file' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='A csv or a json file containing the validation data.' )
parser.add_argument(
'--max_length' , type=SCREAMING_SNAKE_CASE , default=5 , help='The maximum total input sequence length after tokenization.' , )
parser.add_argument(
'--num_beams' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help=(
'Number of beams to use for evaluation. This argument will be '
'passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'
) , )
parser.add_argument(
'--model_name_or_path' , type=SCREAMING_SNAKE_CASE , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=SCREAMING_SNAKE_CASE , )
parser.add_argument(
'--config_name' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='Pretrained config name or path if not the same as model_name' , )
parser.add_argument(
'--device' , type=SCREAMING_SNAKE_CASE , default='cpu' , help='Device where the model will be run' , )
parser.add_argument('--output_file_path' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='Where to store the final ONNX file.' )
_lowercase : Optional[Any] = parser.parse_args()
return args
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE="cpu" ) -> List[Any]:
_lowercase : Dict = model_dict[model_name].from_pretrained(SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE )
_lowercase : int = tokenizer_dict[model_name].from_pretrained(SCREAMING_SNAKE_CASE )
if model_name in ["facebook/bart-base"]:
_lowercase : Dict = 0
_lowercase : Optional[int] = None
_lowercase : Union[str, Any] = 0
return huggingface_model, tokenizer
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
model.eval()
_lowercase : List[Any] = None
_lowercase : List[str] = torch.jit.script(BARTBeamSearchGenerator(SCREAMING_SNAKE_CASE ) )
with torch.no_grad():
_lowercase : Optional[int] = 'My friends are cool but they eat too many carbs.'
_lowercase : int = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1_024 , return_tensors='pt' ).to(model.device )
_lowercase : str = model.generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , num_beams=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , early_stopping=SCREAMING_SNAKE_CASE , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
SCREAMING_SNAKE_CASE , (
inputs['input_ids'],
inputs['attention_mask'],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , SCREAMING_SNAKE_CASE , opset_version=14 , input_names=['input_ids', 'attention_mask', 'num_beams', 'max_length', 'decoder_start_token_id'] , output_names=['output_ids'] , dynamic_axes={
'input_ids': {0: 'batch', 1: 'seq'},
'output_ids': {0: 'batch', 1: 'seq_out'},
} , example_outputs=SCREAMING_SNAKE_CASE , )
logger.info('Model exported to {}'.format(SCREAMING_SNAKE_CASE ) )
_lowercase : str = remove_dup_initializers(os.path.abspath(SCREAMING_SNAKE_CASE ) )
logger.info('Deduplicated and optimized model written to {}'.format(SCREAMING_SNAKE_CASE ) )
_lowercase : Union[str, Any] = onnxruntime.InferenceSession(SCREAMING_SNAKE_CASE )
_lowercase : Union[str, Any] = ort_sess.run(
SCREAMING_SNAKE_CASE , {
'input_ids': inputs['input_ids'].cpu().numpy(),
'attention_mask': inputs['attention_mask'].cpu().numpy(),
'num_beams': np.array(SCREAMING_SNAKE_CASE ),
'max_length': np.array(SCREAMING_SNAKE_CASE ),
'decoder_start_token_id': np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info('Model outputs from torch and ONNX Runtime are similar.' )
logger.info('Success.' )
def __magic_name__ ( ) -> Any:
_lowercase : Dict = parse_args()
_lowercase : Union[str, Any] = 5
_lowercase : Union[str, Any] = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
_lowercase : Optional[Any] = torch.device(args.device )
_lowercase , _lowercase : List[Any] = load_model_tokenizer(args.model_name_or_path , SCREAMING_SNAKE_CASE )
if model.config.decoder_start_token_id is None:
raise ValueError('Make sure that `config.decoder_start_token_id` is correctly defined' )
model.to(SCREAMING_SNAKE_CASE )
if args.max_length:
_lowercase : Any = args.max_length
if args.num_beams:
_lowercase : List[str] = args.num_beams
if args.output_file_path:
_lowercase : Union[str, Any] = args.output_file_path
else:
_lowercase : Tuple = 'BART.onnx'
logger.info('Exporting model to ONNX' )
export_and_validate_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 677 | 1 |
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]:
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
_lowercase : str = flax_key_tuple[:-1] + ('weight',)
_lowercase : Tuple = torch.permute(SCREAMING_SNAKE_CASE , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(SCREAMING_SNAKE_CASE ):
# linear layer
_lowercase : str = flax_key_tuple[:-1] + ('weight',)
_lowercase : Optional[Any] = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
_lowercase : int = flax_key_tuple[:-1] + ('weight',)
return flax_key_tuple, flax_tensor
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
if "metadata" in layer:
_lowercase : List[Any] = layer.split('metadata' )
_lowercase : int = ''.join(split_layer[0] )[:-1]
_lowercase : List[str] = [tuple(('metadata' + split_layer[1]).split('/' ) )]
elif "kvstore" in layer:
_lowercase : Union[str, Any] = layer.split('kvstore' )
_lowercase : List[Any] = ''.join(split_layer[0] )[:-1]
_lowercase : Tuple = [tuple(('kvstore' + split_layer[1]).split('/' ) )]
else:
_lowercase : int = layer.split('/' )
_lowercase : Dict = '/'.join(split_layer[:-1] )
_lowercase : Union[str, Any] = (split_layer[-1],)
if "kvstore/path" in layer:
_lowercase : Dict = F"""{switch_checkpoint_path}/{checkpoint_info[layer]}"""
elif "kvstore/driver" in layer:
_lowercase : Dict = 'file'
else:
_lowercase : Union[str, Any] = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]:
_lowercase : str = rename_keys(SCREAMING_SNAKE_CASE )
_lowercase : Dict = {}
for k, v in current_block.items():
_lowercase : str = v
_lowercase : Optional[Any] = new_current_block
torch.save(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = WEIGHTS_NAME ) -> str:
_lowercase : Union[str, Any] = convert_file_size_to_int(SCREAMING_SNAKE_CASE )
_lowercase : List[str] = []
_lowercase : int = {}
_lowercase : Optional[int] = 0
_lowercase : Dict = 0
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
with gfile.GFile(switch_checkpoint_path + '/checkpoint' , 'rb' ) as fp:
_lowercase : List[str] = serialization.msgpack_restore(fp.read() )['optimizer']['target']
_lowercase : Union[str, Any] = flatten_dict(SCREAMING_SNAKE_CASE , sep='/' )
_lowercase : Any = {}
for layer in checkpoint_info.keys():
_lowercase , _lowercase , _lowercase : Optional[Any] = get_key_and_tensorstore_dict(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if curr_real_layer_name in all_layers:
_lowercase : Any = content
else:
_lowercase : Union[str, Any] = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
_lowercase : int = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
_lowercase : Union[str, Any] = torch.tensor(SCREAMING_SNAKE_CASE )
_lowercase : str = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
_lowercase , _lowercase : List[Any] = rename_base_flax_keys(tuple(key.split('/' ) ) , SCREAMING_SNAKE_CASE )
_lowercase : Tuple = '/'.join(SCREAMING_SNAKE_CASE )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
_lowercase : int = os.path.join(
SCREAMING_SNAKE_CASE , weights_name.replace('.bin' , F"""-{len(SCREAMING_SNAKE_CASE )+1:05d}-of-???.bin""" ) )
rename_and_save_block(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
sharded_state_dicts.append(current_block.keys() )
del current_block
_lowercase : Union[str, Any] = {}
_lowercase : Optional[int] = 0
_lowercase : Tuple = raw_weights.to(getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
_lowercase : Tuple = os.path.join(SCREAMING_SNAKE_CASE , weights_name.replace('.bin' , F"""-{len(SCREAMING_SNAKE_CASE )+1:05d}-of-???.bin""" ) )
rename_and_save_block(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(SCREAMING_SNAKE_CASE ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
_lowercase : Any = {}
_lowercase : List[Any] = {}
for idx, shard in enumerate(SCREAMING_SNAKE_CASE ):
_lowercase : Union[str, Any] = weights_name.replace(
'.bin' , F"""-{idx+1:05d}-of-{len(SCREAMING_SNAKE_CASE ):05d}.bin""" ) # len(sharded_state_dicts):05d}
_lowercase : Optional[int] = os.path.join(SCREAMING_SNAKE_CASE , weights_name.replace('.bin' , F"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
_lowercase : Dict = shard
for key in shard:
_lowercase : Tuple = shard_file
# Add the metadata
_lowercase : Dict = {'total_size': total_size}
_lowercase : Optional[int] = {'metadata': metadata, 'weight_map': weight_map}
with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , 'w' , encoding='utf-8' ) as f:
_lowercase : Dict = json.dumps(SCREAMING_SNAKE_CASE , indent=2 , sort_keys=SCREAMING_SNAKE_CASE ) + '\n'
f.write(SCREAMING_SNAKE_CASE )
return metadata, index
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--max_shard_size", default="10GB", required=False, help="Max shard size")
parser.add_argument("--dtype", default="bfloat16", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted",
type=str,
required=False,
help="Path to the output pytorch model.",
)
UpperCamelCase = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def __magic_name__ ( ) -> List[Any]:
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
_lowercase : Optional[Any] = SwitchTransformersConfig.from_pretrained('google/switch-base-8' )
config.save_pretrained('/home/arthur_huggingface_co/transformers/switch_converted' )
_lowercase : Optional[int] = SwitchTransformersForConditionalGeneration.from_pretrained(
'/home/arthur_huggingface_co/transformers/switch_converted' , device_map='auto' )
_lowercase : Optional[Any] = TaTokenizer.from_pretrained('t5-small' )
_lowercase : int = 'A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.'
_lowercase : str = tokenizer(SCREAMING_SNAKE_CASE , return_tensors='pt' ).input_ids
_lowercase : int = model.generate(SCREAMING_SNAKE_CASE , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 677 |
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class lowerCAmelCase_ ( __snake_case , __snake_case , unittest.TestCase ):
_UpperCamelCase : Union[str, Any] = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_UpperCamelCase : List[Any] = (
{
"feature-extraction": TFMobileBertModel,
"fill-mask": TFMobileBertForMaskedLM,
"question-answering": TFMobileBertForQuestionAnswering,
"text-classification": TFMobileBertForSequenceClassification,
"token-classification": TFMobileBertForTokenClassification,
"zero-shot": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_UpperCamelCase : int = False
_UpperCamelCase : Optional[int] = False
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ):
_lowercase : int = super()._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
if return_labels:
if model_class in get_values(_lowerCAmelCase ):
_lowercase : Optional[int] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class lowerCAmelCase_ ( __snake_case ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=1_3 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=9_9 , _lowerCAmelCase=3_2 , _lowerCAmelCase=3_2 , _lowerCAmelCase=2 , _lowerCAmelCase=4 , _lowerCAmelCase=3_7 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=5_1_2 , _lowerCAmelCase=1_6 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=None , ):
_lowercase : Optional[Any] = parent
_lowercase : str = batch_size
_lowercase : Optional[int] = seq_length
_lowercase : Tuple = is_training
_lowercase : List[Any] = use_input_mask
_lowercase : Optional[Any] = use_token_type_ids
_lowercase : Any = use_labels
_lowercase : str = vocab_size
_lowercase : List[Any] = hidden_size
_lowercase : Union[str, Any] = num_hidden_layers
_lowercase : Tuple = num_attention_heads
_lowercase : Optional[int] = intermediate_size
_lowercase : Tuple = hidden_act
_lowercase : Dict = hidden_dropout_prob
_lowercase : Optional[int] = attention_probs_dropout_prob
_lowercase : Tuple = max_position_embeddings
_lowercase : List[str] = type_vocab_size
_lowercase : Optional[Any] = type_sequence_label_size
_lowercase : List[Any] = initializer_range
_lowercase : List[str] = num_labels
_lowercase : Union[str, Any] = num_choices
_lowercase : List[str] = scope
_lowercase : Union[str, Any] = embedding_size
def __a ( self ):
_lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase : Optional[int] = None
if self.use_input_mask:
_lowercase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase : int = None
if self.use_token_type_ids:
_lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowercase : Dict = None
_lowercase : Any = None
_lowercase : int = None
if self.use_labels:
_lowercase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowercase : Dict = ids_tensor([self.batch_size] , self.num_choices )
_lowercase : Optional[Any] = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Union[str, Any] = TFMobileBertModel(config=_lowerCAmelCase )
_lowercase : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowercase : Union[str, Any] = model(_lowerCAmelCase )
_lowercase : Tuple = [input_ids, input_mask]
_lowercase : str = model(_lowerCAmelCase )
_lowercase : List[str] = model(_lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[int] = TFMobileBertForMaskedLM(config=_lowerCAmelCase )
_lowercase : Union[str, Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowercase : int = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Any = TFMobileBertForNextSentencePrediction(config=_lowerCAmelCase )
_lowercase : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowercase : Optional[int] = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[Any] = TFMobileBertForPreTraining(config=_lowerCAmelCase )
_lowercase : Tuple = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowercase : Union[str, Any] = model(_lowerCAmelCase )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[int] = self.num_labels
_lowercase : Tuple = TFMobileBertForSequenceClassification(config=_lowerCAmelCase )
_lowercase : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowercase : List[str] = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[Any] = self.num_choices
_lowercase : List[str] = TFMobileBertForMultipleChoice(config=_lowerCAmelCase )
_lowercase : Optional[int] = tf.tile(tf.expand_dims(_lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_lowercase : Optional[int] = tf.tile(tf.expand_dims(_lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_lowercase : Tuple = tf.tile(tf.expand_dims(_lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_lowercase : str = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
_lowercase : Union[str, Any] = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : List[str] = self.num_labels
_lowercase : int = TFMobileBertForTokenClassification(config=_lowerCAmelCase )
_lowercase : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowercase : List[str] = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Tuple = TFMobileBertForQuestionAnswering(config=_lowerCAmelCase )
_lowercase : Any = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowercase : int = model(_lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self ):
_lowercase : List[str] = self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : int = config_and_inputs
_lowercase : Tuple = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
def __a ( self ):
_lowercase : List[str] = TFMobileBertModelTest.TFMobileBertModelTester(self )
_lowercase : Union[str, Any] = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=3_7 )
def __a ( self ):
self.config_tester.run_common_tests()
def __a ( self ):
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*_lowerCAmelCase )
def __a ( self ):
_lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*_lowerCAmelCase )
def __a ( self ):
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*_lowerCAmelCase )
def __a ( self ):
_lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*_lowerCAmelCase )
def __a ( self ):
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*_lowerCAmelCase )
def __a ( self ):
_lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*_lowerCAmelCase )
def __a ( self ):
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*_lowerCAmelCase )
def __a ( self ):
_lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*_lowerCAmelCase )
@slow
def __a ( self ):
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
_lowercase : List[str] = TFMobileBertModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def __a ( self ):
_lowercase : Dict = TFMobileBertForPreTraining.from_pretrained('google/mobilebert-uncased' )
_lowercase : Union[str, Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
_lowercase : List[str] = model(_lowerCAmelCase )[0]
_lowercase : str = [1, 6, 3_0_5_2_2]
self.assertEqual(output.shape , _lowerCAmelCase )
_lowercase : List[Any] = tf.constant(
[
[
[-4.5_91_95_47, -9.24_82_95, -9.64_52_56],
[-6.7_30_61_75, -6.44_02_84, -6.6_05_28_37],
[-7.2_74_35_06, -6.7_84_79_15, -6.02_46_73],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _lowerCAmelCase , atol=1E-4 )
| 677 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowerCAmelCase_ ( unittest.TestCase ):
def __a ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __a ( self ):
_lowercase : List[Any] = 1
_lowercase : Dict = 3
_lowercase : List[Any] = (3_2, 3_2)
_lowercase : Optional[Any] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_lowerCAmelCase )
return image
@property
def __a ( self ):
torch.manual_seed(0 )
_lowercase : List[str] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , )
return model
@property
def __a ( self ):
torch.manual_seed(0 )
_lowercase : Optional[int] = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def __a ( self ):
torch.manual_seed(0 )
_lowercase : Dict = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_0_0_6 , )
return RobertaSeriesModelWithTransformation(_lowerCAmelCase )
@property
def __a ( self ):
def extract(*_lowerCAmelCase , **_lowerCAmelCase ):
class lowerCAmelCase_ :
def __init__( self ):
_lowercase : Optional[int] = torch.ones([0] )
def __a ( self , _lowerCAmelCase ):
self.pixel_values.to(_lowerCAmelCase )
return self
return Out()
return extract
def __a ( self ):
_lowercase : Tuple = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowercase : Union[str, Any] = self.dummy_cond_unet
_lowercase : Any = PNDMScheduler(skip_prk_steps=_lowerCAmelCase )
_lowercase : Any = self.dummy_vae
_lowercase : Dict = self.dummy_text_encoder
_lowercase : List[str] = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' )
_lowercase : Any = 7_7
_lowercase : int = self.dummy_image.to(_lowerCAmelCase )
_lowercase : List[Any] = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
_lowercase : Tuple = AltDiffusionImgaImgPipeline(
unet=_lowerCAmelCase , scheduler=_lowerCAmelCase , vae=_lowerCAmelCase , text_encoder=_lowerCAmelCase , tokenizer=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=self.dummy_extractor , )
_lowercase : Tuple = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=_lowerCAmelCase )
_lowercase : int = alt_pipe.to(_lowerCAmelCase )
alt_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
_lowercase : Any = 'A painting of a squirrel eating a burger'
_lowercase : List[Any] = torch.Generator(device=_lowerCAmelCase ).manual_seed(0 )
_lowercase : str = alt_pipe(
[prompt] , generator=_lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , image=_lowerCAmelCase , )
_lowercase : int = output.images
_lowercase : List[Any] = torch.Generator(device=_lowerCAmelCase ).manual_seed(0 )
_lowercase : Optional[Any] = alt_pipe(
[prompt] , generator=_lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , image=_lowerCAmelCase , return_dict=_lowerCAmelCase , )[0]
_lowercase : str = image[0, -3:, -3:, -1]
_lowercase : List[str] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
_lowercase : Optional[int] = np.array([0.44_27, 0.37_31, 0.42_49, 0.49_41, 0.45_46, 0.41_48, 0.41_93, 0.46_66, 0.44_99] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def __a ( self ):
_lowercase : List[str] = self.dummy_cond_unet
_lowercase : Union[str, Any] = PNDMScheduler(skip_prk_steps=_lowerCAmelCase )
_lowercase : Union[str, Any] = self.dummy_vae
_lowercase : str = self.dummy_text_encoder
_lowercase : int = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' )
_lowercase : str = 7_7
_lowercase : List[Any] = self.dummy_image.to(_lowerCAmelCase )
# put models in fp16
_lowercase : List[Any] = unet.half()
_lowercase : Any = vae.half()
_lowercase : List[Any] = bert.half()
# make sure here that pndm scheduler skips prk
_lowercase : str = AltDiffusionImgaImgPipeline(
unet=_lowerCAmelCase , scheduler=_lowerCAmelCase , vae=_lowerCAmelCase , text_encoder=_lowerCAmelCase , tokenizer=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=self.dummy_extractor , )
_lowercase : Tuple = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=_lowerCAmelCase )
_lowercase : Any = alt_pipe.to(_lowerCAmelCase )
alt_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
_lowercase : Union[str, Any] = 'A painting of a squirrel eating a burger'
_lowercase : str = torch.manual_seed(0 )
_lowercase : Union[str, Any] = alt_pipe(
[prompt] , generator=_lowerCAmelCase , num_inference_steps=2 , output_type='np' , image=_lowerCAmelCase , ).images
assert image.shape == (1, 3_2, 3_2, 3)
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def __a ( self ):
_lowercase : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
# resize to resolution that is divisible by 8 but not 16 or 32
_lowercase : List[Any] = init_image.resize((7_6_0, 5_0_4) )
_lowercase : List[str] = 'BAAI/AltDiffusion'
_lowercase : str = AltDiffusionImgaImgPipeline.from_pretrained(
_lowerCAmelCase , safety_checker=_lowerCAmelCase , )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
_lowercase : Dict = 'A fantasy landscape, trending on artstation'
_lowercase : Union[str, Any] = torch.manual_seed(0 )
_lowercase : Optional[int] = pipe(
prompt=_lowerCAmelCase , image=_lowerCAmelCase , strength=0.75 , guidance_scale=7.5 , generator=_lowerCAmelCase , output_type='np' , )
_lowercase : Tuple = output.images[0]
_lowercase : Tuple = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert image.shape == (5_0_4, 7_6_0, 3)
_lowercase : List[str] = np.array([0.93_58, 0.93_97, 0.95_99, 0.99_01, 1.00_00, 1.00_00, 0.98_82, 1.00_00, 1.00_00] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
def __a ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self ):
_lowercase : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
_lowercase : List[str] = init_image.resize((7_6_8, 5_1_2) )
_lowercase : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy' )
_lowercase : Tuple = 'BAAI/AltDiffusion'
_lowercase : Optional[int] = AltDiffusionImgaImgPipeline.from_pretrained(
_lowerCAmelCase , safety_checker=_lowerCAmelCase , )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
_lowercase : int = 'A fantasy landscape, trending on artstation'
_lowercase : Dict = torch.manual_seed(0 )
_lowercase : Optional[int] = pipe(
prompt=_lowerCAmelCase , image=_lowerCAmelCase , strength=0.75 , guidance_scale=7.5 , generator=_lowerCAmelCase , output_type='np' , )
_lowercase : List[str] = output.images[0]
assert image.shape == (5_1_2, 7_6_8, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 677 |
import qiskit
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> qiskit.result.counts.Counts:
_lowercase : Union[str, Any] = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
_lowercase : Optional[Any] = qiskit.QuantumCircuit(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
_lowercase : Optional[Any] = qiskit.execute(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , shots=1_000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCamelCase = single_qubit_measure(2, 2)
print(f'''Total count for various states are: {counts}''')
| 677 | 1 |
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
UpperCamelCase = logging.getLogger(__name__)
UpperCamelCase = {"facebook/bart-base": BartForConditionalGeneration}
UpperCamelCase = {"facebook/bart-base": BartTokenizer}
def __magic_name__ ( ) -> str:
_lowercase : Optional[int] = argparse.ArgumentParser(description='Export Bart model + Beam Search to ONNX graph.' )
parser.add_argument(
'--validation_file' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='A csv or a json file containing the validation data.' )
parser.add_argument(
'--max_length' , type=SCREAMING_SNAKE_CASE , default=5 , help='The maximum total input sequence length after tokenization.' , )
parser.add_argument(
'--num_beams' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help=(
'Number of beams to use for evaluation. This argument will be '
'passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'
) , )
parser.add_argument(
'--model_name_or_path' , type=SCREAMING_SNAKE_CASE , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=SCREAMING_SNAKE_CASE , )
parser.add_argument(
'--config_name' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='Pretrained config name or path if not the same as model_name' , )
parser.add_argument(
'--device' , type=SCREAMING_SNAKE_CASE , default='cpu' , help='Device where the model will be run' , )
parser.add_argument('--output_file_path' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='Where to store the final ONNX file.' )
_lowercase : Optional[Any] = parser.parse_args()
return args
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE="cpu" ) -> List[Any]:
_lowercase : Dict = model_dict[model_name].from_pretrained(SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE )
_lowercase : int = tokenizer_dict[model_name].from_pretrained(SCREAMING_SNAKE_CASE )
if model_name in ["facebook/bart-base"]:
_lowercase : Dict = 0
_lowercase : Optional[int] = None
_lowercase : Union[str, Any] = 0
return huggingface_model, tokenizer
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
model.eval()
_lowercase : List[Any] = None
_lowercase : List[str] = torch.jit.script(BARTBeamSearchGenerator(SCREAMING_SNAKE_CASE ) )
with torch.no_grad():
_lowercase : Optional[int] = 'My friends are cool but they eat too many carbs.'
_lowercase : int = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1_024 , return_tensors='pt' ).to(model.device )
_lowercase : str = model.generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , num_beams=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , early_stopping=SCREAMING_SNAKE_CASE , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
SCREAMING_SNAKE_CASE , (
inputs['input_ids'],
inputs['attention_mask'],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , SCREAMING_SNAKE_CASE , opset_version=14 , input_names=['input_ids', 'attention_mask', 'num_beams', 'max_length', 'decoder_start_token_id'] , output_names=['output_ids'] , dynamic_axes={
'input_ids': {0: 'batch', 1: 'seq'},
'output_ids': {0: 'batch', 1: 'seq_out'},
} , example_outputs=SCREAMING_SNAKE_CASE , )
logger.info('Model exported to {}'.format(SCREAMING_SNAKE_CASE ) )
_lowercase : str = remove_dup_initializers(os.path.abspath(SCREAMING_SNAKE_CASE ) )
logger.info('Deduplicated and optimized model written to {}'.format(SCREAMING_SNAKE_CASE ) )
_lowercase : Union[str, Any] = onnxruntime.InferenceSession(SCREAMING_SNAKE_CASE )
_lowercase : Union[str, Any] = ort_sess.run(
SCREAMING_SNAKE_CASE , {
'input_ids': inputs['input_ids'].cpu().numpy(),
'attention_mask': inputs['attention_mask'].cpu().numpy(),
'num_beams': np.array(SCREAMING_SNAKE_CASE ),
'max_length': np.array(SCREAMING_SNAKE_CASE ),
'decoder_start_token_id': np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info('Model outputs from torch and ONNX Runtime are similar.' )
logger.info('Success.' )
def __magic_name__ ( ) -> Any:
_lowercase : Dict = parse_args()
_lowercase : Union[str, Any] = 5
_lowercase : Union[str, Any] = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
_lowercase : Optional[Any] = torch.device(args.device )
_lowercase , _lowercase : List[Any] = load_model_tokenizer(args.model_name_or_path , SCREAMING_SNAKE_CASE )
if model.config.decoder_start_token_id is None:
raise ValueError('Make sure that `config.decoder_start_token_id` is correctly defined' )
model.to(SCREAMING_SNAKE_CASE )
if args.max_length:
_lowercase : Any = args.max_length
if args.num_beams:
_lowercase : List[str] = args.num_beams
if args.output_file_path:
_lowercase : Union[str, Any] = args.output_file_path
else:
_lowercase : Tuple = 'BART.onnx'
logger.info('Exporting model to ONNX' )
export_and_validate_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 677 |
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCamelCase = "platform"
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , ) -> Dict:
if attention_mask is None:
_lowercase : str = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
_lowercase : List[Any] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
_lowercase : List[str] = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_lowercase : Optional[int] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_lowercase : str = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=1_3 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase=9_9 , _lowerCAmelCase=1_6 , _lowerCAmelCase=2 , _lowerCAmelCase=4 , _lowerCAmelCase=4 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=3_2 , _lowerCAmelCase=2 , _lowerCAmelCase=1 , _lowerCAmelCase=0 , _lowerCAmelCase=0.02 , ):
_lowercase : List[str] = parent
_lowercase : List[Any] = batch_size
_lowercase : Optional[Any] = seq_length
_lowercase : Optional[Any] = is_training
_lowercase : Tuple = use_labels
_lowercase : Dict = vocab_size
_lowercase : Any = hidden_size
_lowercase : Optional[Any] = num_hidden_layers
_lowercase : Union[str, Any] = num_attention_heads
_lowercase : Tuple = intermediate_size
_lowercase : Any = hidden_act
_lowercase : Optional[Any] = hidden_dropout_prob
_lowercase : Tuple = attention_probs_dropout_prob
_lowercase : Any = max_position_embeddings
_lowercase : str = eos_token_id
_lowercase : int = pad_token_id
_lowercase : Tuple = bos_token_id
_lowercase : List[Any] = initializer_range
def __a ( self ):
_lowercase : str = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
_lowercase : List[Any] = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
_lowercase : List[str] = shift_tokens_right(_lowerCAmelCase , 1 , 2 )
_lowercase : Tuple = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_lowerCAmelCase , )
_lowercase : List[Any] = prepare_blenderbot_inputs_dict(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return config, inputs_dict
def __a ( self ):
_lowercase , _lowercase : Union[str, Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[Any] = 2_0
_lowercase : List[Any] = model_class_name(_lowerCAmelCase )
_lowercase : List[Any] = model.encode(inputs_dict['input_ids'] )
_lowercase , _lowercase : int = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
_lowercase : Optional[Any] = model.init_cache(decoder_input_ids.shape[0] , _lowerCAmelCase , _lowerCAmelCase )
_lowercase : Optional[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' )
_lowercase : int = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_lowercase : Union[str, Any] = model.decode(
decoder_input_ids[:, :-1] , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase , decoder_position_ids=_lowerCAmelCase , )
_lowercase : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
_lowercase : int = model.decode(
decoder_input_ids[:, -1:] , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_lowerCAmelCase , )
_lowercase : List[Any] = model.decode(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : Optional[int] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Dict = 2_0
_lowercase : Any = model_class_name(_lowerCAmelCase )
_lowercase : int = model.encode(inputs_dict['input_ids'] )
_lowercase , _lowercase : Optional[int] = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
_lowercase : Union[str, Any] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_lowercase : List[str] = model.init_cache(decoder_input_ids.shape[0] , _lowerCAmelCase , _lowerCAmelCase )
_lowercase : int = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_lowercase : List[Any] = model.decode(
decoder_input_ids[:, :-1] , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase , decoder_position_ids=_lowerCAmelCase , )
_lowercase : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
_lowercase : Union[str, Any] = model.decode(
decoder_input_ids[:, -1:] , _lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_lowerCAmelCase , decoder_position_ids=_lowerCAmelCase , )
_lowercase : Dict = model.decode(_lowerCAmelCase , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase )
_lowercase : Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
@require_flax
class lowerCAmelCase_ ( unittest.TestCase ):
_UpperCamelCase : Tuple = 99
def __a ( self ):
_lowercase : Dict = np.array(
[
[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2],
[6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2],
[5, 9_7, 1_7, 3_9, 9_4, 4_0, 2],
[7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2],
[8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2],
[5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding
[6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2],
[5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2],
[4_8, 6_1, 9, 2_4, 7_1, 8_2, 2],
[2_6, 1, 6_0, 4_8, 2_2, 1_3, 2],
[2_1, 5, 6_2, 2_8, 1_4, 7_6, 2],
[4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2],
[7_0, 7_0, 5_0, 9, 2_8, 0, 2],
] , dtype=np.intaa , )
_lowercase : Union[str, Any] = input_ids.shape[0]
_lowercase : Optional[int] = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def __a ( self ):
_lowercase , _lowercase , _lowercase : int = self._get_config_and_data()
_lowercase : Union[str, Any] = FlaxBlenderbotSmallForConditionalGeneration(_lowerCAmelCase )
_lowercase : Union[str, Any] = lm_model(input_ids=_lowerCAmelCase )
_lowercase : str = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['logits'].shape , _lowerCAmelCase )
def __a ( self ):
_lowercase : Union[str, Any] = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , )
_lowercase : Optional[int] = FlaxBlenderbotSmallForConditionalGeneration(_lowerCAmelCase )
_lowercase : Optional[Any] = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa )
_lowercase : Optional[int] = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa )
_lowercase : Dict = lm_model(input_ids=_lowerCAmelCase , decoder_input_ids=_lowerCAmelCase )
_lowercase : Tuple = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['logits'].shape , _lowerCAmelCase )
def __a ( self ):
_lowercase : Dict = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa )
_lowercase : Union[str, Any] = shift_tokens_right(_lowerCAmelCase , 1 , 2 )
_lowercase : Dict = np.equal(_lowerCAmelCase , 1 ).astype(np.floataa ).sum()
_lowercase : Dict = np.equal(_lowerCAmelCase , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(_lowerCAmelCase , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class lowerCAmelCase_ ( __snake_case , unittest.TestCase , __snake_case ):
_UpperCamelCase : int = True
_UpperCamelCase : Any = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
_UpperCamelCase : Any = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def __a ( self ):
_lowercase : List[str] = FlaxBlenderbotSmallModelTester(self )
def __a ( self ):
_lowercase , _lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def __a ( self ):
_lowercase , _lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def __a ( self ):
_lowercase , _lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowercase : Any = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : str = model_class(_lowerCAmelCase )
@jax.jit
def encode_jitted(_lowerCAmelCase , _lowerCAmelCase=None , **_lowerCAmelCase ):
return model.encode(input_ids=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
with self.subTest('JIT Enabled' ):
_lowercase : Dict = encode_jitted(**_lowerCAmelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
_lowercase : Dict = encode_jitted(**_lowerCAmelCase ).to_tuple()
self.assertEqual(len(_lowerCAmelCase ) , len(_lowerCAmelCase ) )
for jitted_output, output in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def __a ( self ):
_lowercase , _lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowercase : int = model_class(_lowerCAmelCase )
_lowercase : int = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] )
_lowercase : List[Any] = {
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
return model.decode(
decoder_input_ids=_lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , encoder_outputs=_lowerCAmelCase , )
with self.subTest('JIT Enabled' ):
_lowercase : Dict = decode_jitted(**_lowerCAmelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
_lowercase : Any = decode_jitted(**_lowerCAmelCase ).to_tuple()
self.assertEqual(len(_lowerCAmelCase ) , len(_lowerCAmelCase ) )
for jitted_output, output in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def __a ( self ):
for model_class_name in self.all_model_classes:
_lowercase : Dict = model_class_name.from_pretrained('facebook/blenderbot_small-90M' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
_lowercase : Any = np.ones((1, 1) ) * model.config.eos_token_id
_lowercase : int = model(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
| 677 | 1 |
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=7 , _lowerCAmelCase=3 , _lowerCAmelCase=1_8 , _lowerCAmelCase=3_0 , _lowerCAmelCase=4_0_0 , _lowerCAmelCase=True , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase=[0.5, 0.5, 0.5] , _lowerCAmelCase=[0.5, 0.5, 0.5] , _lowerCAmelCase=False , ):
_lowercase : str = size if size is not None else {'height': 2_0, 'width': 2_0}
_lowercase : str = crop_size if crop_size is not None else {'height': 1_8, 'width': 1_8}
_lowercase : Union[str, Any] = parent
_lowercase : Dict = batch_size
_lowercase : str = num_channels
_lowercase : int = image_size
_lowercase : Union[str, Any] = min_resolution
_lowercase : int = max_resolution
_lowercase : Optional[Any] = do_resize
_lowercase : List[str] = size
_lowercase : int = do_center_crop
_lowercase : Dict = crop_size
_lowercase : Optional[Any] = do_normalize
_lowercase : str = image_mean
_lowercase : Optional[int] = image_std
_lowercase : Union[str, Any] = do_reduce_labels
def __a ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def __magic_name__ ( ) -> int:
_lowercase : Dict = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
_lowercase : Tuple = Image.open(dataset[0]['file'] )
_lowercase : List[str] = Image.open(dataset[1]['file'] )
return image, map
def __magic_name__ ( ) -> Optional[Any]:
_lowercase : Dict = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
_lowercase : Optional[int] = Image.open(ds[0]['file'] )
_lowercase : Union[str, Any] = Image.open(ds[1]['file'] )
_lowercase : List[str] = Image.open(ds[2]['file'] )
_lowercase : Optional[Any] = Image.open(ds[3]['file'] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class lowerCAmelCase_ ( __snake_case , unittest.TestCase ):
_UpperCamelCase : Dict = BeitImageProcessor if is_vision_available() else None
def __a ( self ):
_lowercase : Union[str, Any] = BeitImageProcessingTester(self )
@property
def __a ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self ):
_lowercase : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'size' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'do_center_crop' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'center_crop' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'do_normalize' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'image_mean' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'image_std' ) )
def __a ( self ):
_lowercase : int = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 2_0, 'width': 2_0} )
self.assertEqual(image_processor.crop_size , {'height': 1_8, 'width': 1_8} )
self.assertEqual(image_processor.do_reduce_labels , _lowerCAmelCase )
_lowercase : Optional[Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , crop_size=8_4 , reduce_labels=_lowerCAmelCase )
self.assertEqual(image_processor.size , {'height': 4_2, 'width': 4_2} )
self.assertEqual(image_processor.crop_size , {'height': 8_4, 'width': 8_4} )
self.assertEqual(image_processor.do_reduce_labels , _lowerCAmelCase )
def __a ( self ):
pass
def __a ( self ):
# Initialize image_processing
_lowercase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowercase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , Image.Image )
# Test not batched input
_lowercase : Dict = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowercase : Any = image_processing(_lowerCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def __a ( self ):
# Initialize image_processing
_lowercase : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowercase : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , numpify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , np.ndarray )
# Test not batched input
_lowercase : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowercase : Dict = image_processing(_lowerCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def __a ( self ):
# Initialize image_processing
_lowercase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowercase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , torchify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , torch.Tensor )
# Test not batched input
_lowercase : int = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowercase : List[str] = image_processing(_lowerCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def __a ( self ):
# Initialize image_processing
_lowercase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowercase : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , torchify=_lowerCAmelCase )
_lowercase : Any = []
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
_lowercase : Optional[Any] = image_processing(image_inputs[0] , maps[0] , return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(
encoding['labels'].shape , (
1,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(encoding['labels'].dtype , torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 2_5_5 )
# Test batched
_lowercase : int = image_processing(_lowerCAmelCase , _lowerCAmelCase , return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(
encoding['labels'].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(encoding['labels'].dtype , torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 2_5_5 )
# Test not batched input (PIL images)
_lowercase , _lowercase : Optional[Any] = prepare_semantic_single_inputs()
_lowercase : Any = image_processing(_lowerCAmelCase , _lowerCAmelCase , return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(
encoding['labels'].shape , (
1,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(encoding['labels'].dtype , torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 2_5_5 )
# Test batched input (PIL images)
_lowercase , _lowercase : Tuple = prepare_semantic_batch_inputs()
_lowercase : Optional[Any] = image_processing(_lowerCAmelCase , _lowerCAmelCase , return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(
encoding['labels'].shape , (
2,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(encoding['labels'].dtype , torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 2_5_5 )
def __a ( self ):
# Initialize image_processing
_lowercase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
_lowercase , _lowercase : int = prepare_semantic_single_inputs()
_lowercase : Dict = image_processing(_lowerCAmelCase , _lowerCAmelCase , return_tensors='pt' )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 1_5_0 )
_lowercase : Optional[Any] = True
_lowercase : str = image_processing(_lowerCAmelCase , _lowerCAmelCase , return_tensors='pt' )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 2_5_5 )
| 677 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json",
"allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json",
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"
),
}
class lowerCAmelCase_ ( __snake_case ):
_UpperCamelCase : Dict = "longformer"
def __init__( self , _lowerCAmelCase = 5_1_2 , _lowerCAmelCase = 2 , _lowerCAmelCase = 1 , _lowerCAmelCase = 0 , _lowerCAmelCase = 2 , _lowerCAmelCase = 3_0_5_2_2 , _lowerCAmelCase = 7_6_8 , _lowerCAmelCase = 1_2 , _lowerCAmelCase = 1_2 , _lowerCAmelCase = 3_0_7_2 , _lowerCAmelCase = "gelu" , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 5_1_2 , _lowerCAmelCase = 2 , _lowerCAmelCase = 0.02 , _lowerCAmelCase = 1E-12 , _lowerCAmelCase = False , **_lowerCAmelCase , ):
super().__init__(pad_token_id=_lowerCAmelCase , **_lowerCAmelCase )
_lowercase : Optional[int] = attention_window
_lowercase : str = sep_token_id
_lowercase : Optional[Any] = bos_token_id
_lowercase : List[Any] = eos_token_id
_lowercase : Optional[Any] = vocab_size
_lowercase : List[Any] = hidden_size
_lowercase : Union[str, Any] = num_hidden_layers
_lowercase : Optional[int] = num_attention_heads
_lowercase : List[str] = hidden_act
_lowercase : List[str] = intermediate_size
_lowercase : List[Any] = hidden_dropout_prob
_lowercase : str = attention_probs_dropout_prob
_lowercase : Any = max_position_embeddings
_lowercase : int = type_vocab_size
_lowercase : Optional[int] = initializer_range
_lowercase : List[Any] = layer_norm_eps
_lowercase : List[str] = onnx_export
class lowerCAmelCase_ ( __snake_case ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase = "default" , _lowerCAmelCase = None ):
super().__init__(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
_lowercase : str = True
@property
def __a ( self ):
if self.task == "multiple-choice":
_lowercase : List[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowercase : int = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('global_attention_mask', dynamic_axis),
] )
@property
def __a ( self ):
_lowercase : Optional[int] = super().outputs
if self.task == "default":
_lowercase : List[str] = {0: 'batch'}
return outputs
@property
def __a ( self ):
return 1E-4
@property
def __a ( self ):
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 1_4 )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ):
_lowercase : int = super().generate_dummy_inputs(
preprocessor=_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
_lowercase : str = torch.zeros_like(inputs['input_ids'] )
# make every second token global
_lowercase : Any = 1
return inputs
| 677 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase = {"configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig", "ViTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["ViTFeatureExtractor"]
UpperCamelCase = ["ViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"VIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTForImageClassification",
"ViTForMaskedImageModeling",
"ViTModel",
"ViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"TFViTForImageClassification",
"TFViTModel",
"TFViTPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"FlaxViTForImageClassification",
"FlaxViTModel",
"FlaxViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 677 |
from __future__ import annotations
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> bool:
return len(set(SCREAMING_SNAKE_CASE ) ) == len(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 677 | 1 |
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase_ ( __snake_case ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=1_3 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=2 , _lowerCAmelCase=9_9 , _lowerCAmelCase=0 , _lowerCAmelCase=3_2 , _lowerCAmelCase=5 , _lowerCAmelCase=4 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=5_1_2 , _lowerCAmelCase=1_2 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase="last" , _lowerCAmelCase=None , _lowerCAmelCase=None , ):
_lowercase : Optional[int] = parent
_lowercase : str = batch_size
_lowercase : Dict = seq_length
_lowercase : Optional[int] = is_training
_lowercase : Tuple = use_input_lengths
_lowercase : Optional[Any] = use_token_type_ids
_lowercase : str = use_labels
_lowercase : Any = gelu_activation
_lowercase : int = sinusoidal_embeddings
_lowercase : int = causal
_lowercase : Tuple = asm
_lowercase : Optional[Any] = n_langs
_lowercase : Optional[Any] = vocab_size
_lowercase : List[Any] = n_special
_lowercase : Optional[Any] = hidden_size
_lowercase : Dict = num_hidden_layers
_lowercase : Dict = num_attention_heads
_lowercase : List[Any] = hidden_dropout_prob
_lowercase : List[str] = attention_probs_dropout_prob
_lowercase : Any = max_position_embeddings
_lowercase : Optional[Any] = type_vocab_size
_lowercase : Optional[Any] = type_sequence_label_size
_lowercase : Optional[int] = initializer_range
_lowercase : Optional[int] = num_labels
_lowercase : List[Any] = num_choices
_lowercase : Optional[int] = summary_type
_lowercase : Optional[int] = use_proj
_lowercase : Union[str, Any] = scope
def __a ( self ):
_lowercase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase : List[Any] = None
if self.use_input_lengths:
_lowercase : Union[str, Any] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
_lowercase : Optional[Any] = None
if self.use_token_type_ids:
_lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
_lowercase : Tuple = None
_lowercase : Dict = None
_lowercase : List[Any] = None
if self.use_labels:
_lowercase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowercase : Optional[int] = ids_tensor([self.batch_size] , 2 ).float()
_lowercase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
_lowercase : Any = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __a ( self ):
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
_lowercase : Union[str, Any] = FlaubertModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowercase : Union[str, Any] = model(_lowerCAmelCase , lengths=_lowerCAmelCase , langs=_lowerCAmelCase )
_lowercase : Optional[int] = model(_lowerCAmelCase , langs=_lowerCAmelCase )
_lowercase : Tuple = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
_lowercase : Any = FlaubertWithLMHeadModel(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowercase : List[str] = model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
_lowercase : Tuple = FlaubertForQuestionAnsweringSimple(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowercase : str = model(_lowerCAmelCase )
_lowercase : Any = model(_lowerCAmelCase , start_positions=_lowerCAmelCase , end_positions=_lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
_lowercase : Optional[Any] = FlaubertForQuestionAnswering(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowercase : Any = model(_lowerCAmelCase )
_lowercase : Tuple = model(
_lowerCAmelCase , start_positions=_lowerCAmelCase , end_positions=_lowerCAmelCase , cls_index=_lowerCAmelCase , is_impossible=_lowerCAmelCase , p_mask=_lowerCAmelCase , )
_lowercase : Any = model(
_lowerCAmelCase , start_positions=_lowerCAmelCase , end_positions=_lowerCAmelCase , cls_index=_lowerCAmelCase , is_impossible=_lowerCAmelCase , )
((_lowercase) , ) : Optional[int] = result_with_labels.to_tuple()
_lowercase : List[Any] = model(_lowerCAmelCase , start_positions=_lowerCAmelCase , end_positions=_lowerCAmelCase )
((_lowercase) , ) : List[str] = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
_lowercase : Any = FlaubertForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowercase : Optional[Any] = model(_lowerCAmelCase )
_lowercase : Dict = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
_lowercase : List[str] = self.num_labels
_lowercase : str = FlaubertForTokenClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowercase : int = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
_lowercase : Optional[int] = self.num_choices
_lowercase : List[str] = FlaubertForMultipleChoice(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowercase : Dict = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowercase : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowercase : int = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowercase : Any = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __a ( self ):
_lowercase : str = self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : Optional[int] = config_and_inputs
_lowercase : str = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'lengths': input_lengths,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( __snake_case , __snake_case , unittest.TestCase ):
_UpperCamelCase : List[str] = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
_UpperCamelCase : Optional[int] = (
{
"feature-extraction": FlaubertModel,
"fill-mask": FlaubertWithLMHeadModel,
"question-answering": FlaubertForQuestionAnsweringSimple,
"text-classification": FlaubertForSequenceClassification,
"token-classification": FlaubertForTokenClassification,
"zero-shot": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ):
_lowercase : int = super()._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
_lowercase : Tuple = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowerCAmelCase )
_lowercase : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowerCAmelCase )
return inputs_dict
def __a ( self ):
_lowercase : Any = FlaubertModelTester(self )
_lowercase : Union[str, Any] = ConfigTester(self , config_class=_lowerCAmelCase , emb_dim=3_7 )
def __a ( self ):
self.config_tester.run_common_tests()
def __a ( self ):
_lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*_lowerCAmelCase )
def __a ( self ):
_lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*_lowerCAmelCase )
def __a ( self ):
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*_lowerCAmelCase )
def __a ( self ):
_lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*_lowerCAmelCase )
def __a ( self ):
_lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*_lowerCAmelCase )
def __a ( self ):
_lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*_lowerCAmelCase )
def __a ( self ):
_lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*_lowerCAmelCase )
@slow
def __a ( self ):
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : Dict = FlaubertModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@slow
@require_torch_gpu
def __a ( self ):
_lowercase , _lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
_lowercase : Union[str, Any] = True
_lowercase : Tuple = model_class(config=_lowerCAmelCase )
_lowercase : str = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : List[Any] = torch.jit.trace(
_lowerCAmelCase , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_lowerCAmelCase , os.path.join(_lowerCAmelCase , 'traced_model.pt' ) )
_lowercase : Any = torch.jit.load(os.path.join(_lowerCAmelCase , 'traced_model.pt' ) , map_location=_lowerCAmelCase )
loaded(inputs_dict['input_ids'].to(_lowerCAmelCase ) , inputs_dict['attention_mask'].to(_lowerCAmelCase ) )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def __a ( self ):
_lowercase : int = FlaubertModel.from_pretrained('flaubert/flaubert_base_cased' )
_lowercase : Union[str, Any] = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
with torch.no_grad():
_lowercase : Union[str, Any] = model(_lowerCAmelCase )[0]
_lowercase : Dict = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , _lowerCAmelCase )
_lowercase : Optional[int] = torch.tensor(
[[[-2.62_51, -1.42_98, -0.02_27], [-2.85_10, -1.63_87, 0.22_58], [-2.81_14, -1.18_32, -0.30_66]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowerCAmelCase , atol=1E-4 ) )
| 677 |
import math
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0 , SCREAMING_SNAKE_CASE = 0 ) -> list:
_lowercase : List[str] = end or len(SCREAMING_SNAKE_CASE )
for i in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
_lowercase : Dict = i
_lowercase : str = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
_lowercase : Optional[Any] = array[temp_index - 1]
temp_index -= 1
_lowercase : Optional[Any] = temp_index_value
return array
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> None: # Max Heap
_lowercase : List[str] = index
_lowercase : List[str] = 2 * index + 1 # Left Node
_lowercase : Union[str, Any] = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
_lowercase : Any = left_index
if right_index < heap_size and array[largest] < array[right_index]:
_lowercase : str = right_index
if largest != index:
_lowercase , _lowercase : List[str] = array[largest], array[index]
heapify(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> list:
_lowercase : Optional[Any] = len(SCREAMING_SNAKE_CASE )
for i in range(n // 2 , -1 , -1 ):
heapify(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for i in range(n - 1 , 0 , -1 ):
_lowercase , _lowercase : List[Any] = array[0], array[i]
heapify(SCREAMING_SNAKE_CASE , 0 , SCREAMING_SNAKE_CASE )
return array
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
_lowercase : Optional[Any] = low
_lowercase : Tuple = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
_lowercase , _lowercase : Tuple = array[j], array[i]
i += 1
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> list:
if len(SCREAMING_SNAKE_CASE ) == 0:
return array
_lowercase : List[str] = 2 * math.ceil(math.loga(len(SCREAMING_SNAKE_CASE ) ) )
_lowercase : str = 16
return intro_sort(SCREAMING_SNAKE_CASE , 0 , len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> list:
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(SCREAMING_SNAKE_CASE )
max_depth -= 1
_lowercase : int = median_of_a(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , start + ((end - start) // 2) + 1 , end - 1 )
_lowercase : str = partition(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
intro_sort(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_lowercase : List[Any] = p
return insertion_sort(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase = input("Enter numbers separated by a comma : ").strip()
UpperCamelCase = [float(item) for item in user_input.split(",")]
print(sort(unsorted))
| 677 | 1 |
from __future__ import annotations
UpperCamelCase = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
UpperCamelCase = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> list[float]:
_lowercase : Tuple = []
_lowercase : Optional[int] = len(SCREAMING_SNAKE_CASE )
for i in range(SCREAMING_SNAKE_CASE ):
_lowercase : float = -1
for j in range(i + 1 , SCREAMING_SNAKE_CASE ):
if arr[i] < arr[j]:
_lowercase : Dict = arr[j]
break
result.append(SCREAMING_SNAKE_CASE )
return result
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> list[float]:
_lowercase : str = []
for i, outer in enumerate(SCREAMING_SNAKE_CASE ):
_lowercase : float = -1
for inner in arr[i + 1 :]:
if outer < inner:
_lowercase : Optional[int] = inner
break
result.append(SCREAMING_SNAKE_CASE )
return result
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> list[float]:
_lowercase : Optional[Any] = len(SCREAMING_SNAKE_CASE )
_lowercase : list[float] = []
_lowercase : list[float] = [-1] * arr_size
for index in reversed(range(SCREAMING_SNAKE_CASE ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
_lowercase : Optional[Any] = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
UpperCamelCase = (
"from __main__ import arr, next_greatest_element_slow, "
"next_greatest_element_fast, next_greatest_element"
)
print(
"next_greatest_element_slow():",
timeit("next_greatest_element_slow(arr)", setup=setup),
)
print(
"next_greatest_element_fast():",
timeit("next_greatest_element_fast(arr)", setup=setup),
)
print(
" next_greatest_element():",
timeit("next_greatest_element(arr)", setup=setup),
)
| 677 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["CLIPFeatureExtractor"]
UpperCamelCase = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 677 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"microsoft/beit-base-patch16-224-pt22k": (
"https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class lowerCAmelCase_ ( __snake_case ):
_UpperCamelCase : Tuple = "beit"
def __init__( self , _lowerCAmelCase=8_1_9_2 , _lowerCAmelCase=7_6_8 , _lowerCAmelCase=1_2 , _lowerCAmelCase=1_2 , _lowerCAmelCase=3_0_7_2 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-12 , _lowerCAmelCase=2_2_4 , _lowerCAmelCase=1_6 , _lowerCAmelCase=3 , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=True , _lowerCAmelCase=[3, 5, 7, 1_1] , _lowerCAmelCase=[1, 2, 3, 6] , _lowerCAmelCase=True , _lowerCAmelCase=0.4 , _lowerCAmelCase=2_5_6 , _lowerCAmelCase=1 , _lowerCAmelCase=False , _lowerCAmelCase=2_5_5 , **_lowerCAmelCase , ):
super().__init__(**_lowerCAmelCase )
_lowercase : Any = vocab_size
_lowercase : int = hidden_size
_lowercase : Tuple = num_hidden_layers
_lowercase : Tuple = num_attention_heads
_lowercase : Any = intermediate_size
_lowercase : Any = hidden_act
_lowercase : Dict = hidden_dropout_prob
_lowercase : List[str] = attention_probs_dropout_prob
_lowercase : List[Any] = initializer_range
_lowercase : Tuple = layer_norm_eps
_lowercase : Any = image_size
_lowercase : List[str] = patch_size
_lowercase : Tuple = num_channels
_lowercase : Optional[Any] = use_mask_token
_lowercase : List[Any] = use_absolute_position_embeddings
_lowercase : Tuple = use_relative_position_bias
_lowercase : Optional[Any] = use_shared_relative_position_bias
_lowercase : Union[str, Any] = layer_scale_init_value
_lowercase : Union[str, Any] = drop_path_rate
_lowercase : Optional[Any] = use_mean_pooling
# decode head attributes (semantic segmentation)
_lowercase : List[Any] = out_indices
_lowercase : Dict = pool_scales
# auxiliary head attributes (semantic segmentation)
_lowercase : Optional[int] = use_auxiliary_head
_lowercase : Tuple = auxiliary_loss_weight
_lowercase : Optional[int] = auxiliary_channels
_lowercase : Union[str, Any] = auxiliary_num_convs
_lowercase : Any = auxiliary_concat_input
_lowercase : str = semantic_loss_ignore_index
class lowerCAmelCase_ ( __snake_case ):
_UpperCamelCase : Tuple = version.parse("1.11" )
@property
def __a ( self ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __a ( self ):
return 1E-4
| 677 |
from collections.abc import Sequence
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float:
return sum(c * (x**i) for i, c in enumerate(SCREAMING_SNAKE_CASE ) )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float:
_lowercase : Optional[Any] = 0.0
for coeff in reversed(SCREAMING_SNAKE_CASE ):
_lowercase : Optional[int] = result * x + coeff
return result
if __name__ == "__main__":
UpperCamelCase = (0.0, 0.0, 5.0, 9.3, 7.0)
UpperCamelCase = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 677 | 1 |
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class lowerCAmelCase_ ( unittest.TestCase ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=1_3 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=9_9 , _lowerCAmelCase=3_2 , _lowerCAmelCase=5 , _lowerCAmelCase=4 , _lowerCAmelCase=3_7 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=5_1_2 , _lowerCAmelCase=1_6 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=4 , ):
_lowercase : Optional[int] = parent
_lowercase : Any = batch_size
_lowercase : Optional[Any] = seq_length
_lowercase : Tuple = is_training
_lowercase : Optional[int] = use_attention_mask
_lowercase : Optional[int] = use_token_type_ids
_lowercase : int = use_labels
_lowercase : Union[str, Any] = vocab_size
_lowercase : List[Any] = hidden_size
_lowercase : Optional[int] = num_hidden_layers
_lowercase : Dict = num_attention_heads
_lowercase : str = intermediate_size
_lowercase : Optional[Any] = hidden_act
_lowercase : str = hidden_dropout_prob
_lowercase : int = attention_probs_dropout_prob
_lowercase : Optional[int] = max_position_embeddings
_lowercase : Union[str, Any] = type_vocab_size
_lowercase : List[Any] = type_sequence_label_size
_lowercase : Any = initializer_range
_lowercase : List[str] = num_choices
def __a ( self ):
_lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase : Union[str, Any] = None
if self.use_attention_mask:
_lowercase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase : str = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=_lowerCAmelCase , )
return config, input_ids, attention_mask
def __a ( self ):
_lowercase : Union[str, Any] = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase : List[str] = config_and_inputs
_lowercase : str = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class lowerCAmelCase_ ( __snake_case , unittest.TestCase ):
_UpperCamelCase : int = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __a ( self ):
_lowercase : List[Any] = FlaxDistilBertModelTester(self )
@slow
def __a ( self ):
for model_class_name in self.all_model_classes:
_lowercase : Union[str, Any] = model_class_name.from_pretrained('distilbert-base-uncased' )
_lowercase : Union[str, Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowerCAmelCase )
@require_flax
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def __a ( self ):
_lowercase : Tuple = FlaxDistilBertModel.from_pretrained('distilbert-base-uncased' )
_lowercase : Optional[int] = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
_lowercase : Any = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_lowercase : Union[str, Any] = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase )[0]
_lowercase : Optional[int] = (1, 1_1, 7_6_8)
self.assertEqual(output.shape , _lowerCAmelCase )
_lowercase : Dict = np.array([[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , _lowerCAmelCase , atol=1E-4 ) )
| 677 |
from __future__ import annotations
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase=None ):
_lowercase : int = data
_lowercase : Union[str, Any] = None
def __repr__( self ):
_lowercase : Dict = []
_lowercase : Tuple = self
while temp:
string_rep.append(F"""{temp.data}""" )
_lowercase : Optional[Any] = temp.next
return "->".join(_lowerCAmelCase )
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Any:
if not elements_list:
raise Exception('The Elements List is empty' )
_lowercase : Union[str, Any] = Node(elements_list[0] )
for i in range(1 , len(SCREAMING_SNAKE_CASE ) ):
_lowercase : Optional[int] = Node(elements_list[i] )
_lowercase : List[Any] = current.next
return head
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> None:
if head_node is not None and isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
print_reverse(head_node.next )
print(head_node.data )
def __magic_name__ ( ) -> List[str]:
from doctest import testmod
testmod()
_lowercase : int = make_linked_list([14, 52, 14, 12, 43] )
print('Linked List:' )
print(SCREAMING_SNAKE_CASE )
print('Elements in Reverse:' )
print_reverse(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 677 | 1 |
import math
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> int:
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
_lowercase : Optional[int] = F"""Input value of [number={number}] must be an integer"""
raise TypeError(SCREAMING_SNAKE_CASE )
if number < 1:
_lowercase : Tuple = F"""Input value of [number={number}] must be > 0"""
raise ValueError(SCREAMING_SNAKE_CASE )
elif number == 1:
return 3
elif number == 2:
return 5
else:
_lowercase : List[Any] = int(math.log(number // 3 , 2 ) ) + 2
_lowercase : Any = [3, 5]
_lowercase : Optional[Any] = 2
_lowercase : Tuple = 3
for block in range(1 , SCREAMING_SNAKE_CASE ):
for _ in range(SCREAMING_SNAKE_CASE ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
UpperCamelCase = 0
try:
UpperCamelCase = proth(number)
except ValueError:
print(f'''ValueError: there is no {number}th Proth number''')
continue
print(f'''The {number}th Proth number: {value}''')
| 677 |
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
UpperCamelCase = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
UpperCamelCase = typing.Union[np.floataa, int, float] # noqa: UP007
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> VectorOut:
return np.sqrt(np.sum((np.asarray(SCREAMING_SNAKE_CASE ) - np.asarray(SCREAMING_SNAKE_CASE )) ** 2 ) )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> VectorOut:
return sum((va - va) ** 2 for va, va in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) ** (1 / 2)
if __name__ == "__main__":
def __magic_name__ ( ) -> None:
from timeit import timeit
print('Without Numpy' )
print(
timeit(
'euclidean_distance_no_np([1, 2, 3], [4, 5, 6])' , number=10_000 , globals=globals() , ) )
print('With Numpy' )
print(
timeit(
'euclidean_distance([1, 2, 3], [4, 5, 6])' , number=10_000 , globals=globals() , ) )
benchmark()
| 677 | 1 |
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
UpperCamelCase = threading.Lock()
UpperCamelCase = None
UpperCamelCase = {
"debug": logging.DEBUG,
"info": logging.INFO,
"warning": logging.WARNING,
"error": logging.ERROR,
"critical": logging.CRITICAL,
}
UpperCamelCase = logging.WARNING
UpperCamelCase = True
def __magic_name__ ( ) -> Dict:
_lowercase : Dict = os.getenv('TRANSFORMERS_VERBOSITY' , SCREAMING_SNAKE_CASE )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F"""Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, """
F"""has to be one of: { ', '.join(log_levels.keys() ) }""" )
return _default_log_level
def __magic_name__ ( ) -> str:
return __name__.split('.' )[0]
def __magic_name__ ( ) -> logging.Logger:
return logging.getLogger(_get_library_name() )
def __magic_name__ ( ) -> None:
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
_lowercase : str = logging.StreamHandler() # Set sys.stderr as stream.
_lowercase : Union[str, Any] = sys.stderr.flush
# Apply our default configuration to the library root logger.
_lowercase : Optional[Any] = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
_lowercase : Any = False
def __magic_name__ ( ) -> None:
global _default_handler
with _lock:
if not _default_handler:
return
_lowercase : Union[str, Any] = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
_lowercase : Any = None
def __magic_name__ ( ) -> str:
return log_levels
def __magic_name__ ( SCREAMING_SNAKE_CASE = None ) -> logging.Logger:
if name is None:
_lowercase : Optional[Any] = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(SCREAMING_SNAKE_CASE )
def __magic_name__ ( ) -> int:
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> None:
_configure_library_root_logger()
_get_library_root_logger().setLevel(SCREAMING_SNAKE_CASE )
def __magic_name__ ( ) -> List[str]:
return set_verbosity(SCREAMING_SNAKE_CASE )
def __magic_name__ ( ) -> Tuple:
return set_verbosity(SCREAMING_SNAKE_CASE )
def __magic_name__ ( ) -> str:
return set_verbosity(SCREAMING_SNAKE_CASE )
def __magic_name__ ( ) -> Optional[int]:
return set_verbosity(SCREAMING_SNAKE_CASE )
def __magic_name__ ( ) -> None:
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def __magic_name__ ( ) -> None:
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> None:
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(SCREAMING_SNAKE_CASE )
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> None:
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(SCREAMING_SNAKE_CASE )
def __magic_name__ ( ) -> None:
_configure_library_root_logger()
_lowercase : Optional[int] = False
def __magic_name__ ( ) -> None:
_configure_library_root_logger()
_lowercase : Any = True
def __magic_name__ ( ) -> None:
_lowercase : Tuple = _get_library_root_logger().handlers
for handler in handlers:
_lowercase : Dict = logging.Formatter('[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s' )
handler.setFormatter(SCREAMING_SNAKE_CASE )
def __magic_name__ ( ) -> None:
_lowercase : Optional[int] = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(SCREAMING_SNAKE_CASE )
def __magic_name__ ( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> List[Any]:
_lowercase : List[str] = os.getenv('TRANSFORMERS_NO_ADVISORY_WARNINGS' , SCREAMING_SNAKE_CASE )
if no_advisory_warnings:
return
self.warning(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
UpperCamelCase = warning_advice
@functools.lru_cache(SCREAMING_SNAKE_CASE )
def __magic_name__ ( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
self.warning(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
UpperCamelCase = warning_once
class lowerCAmelCase_ :
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ): # pylint: disable=unused-argument
_lowercase : str = args[0] if args else None
def __iter__( self ):
return iter(self._iterator )
def __getattr__( self , _lowerCAmelCase ):
def empty_fn(*_lowerCAmelCase , **_lowerCAmelCase ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self ):
return self
def __exit__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
return
class lowerCAmelCase_ :
def __call__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
if _tqdm_active:
return tqdm_lib.tqdm(*_lowerCAmelCase , **_lowerCAmelCase )
else:
return EmptyTqdm(*_lowerCAmelCase , **_lowerCAmelCase )
def __a ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
_lowercase : List[Any] = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*_lowerCAmelCase , **_lowerCAmelCase )
def __a ( self ):
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
UpperCamelCase = _tqdm_cls()
def __magic_name__ ( ) -> bool:
global _tqdm_active
return bool(_tqdm_active )
def __magic_name__ ( ) -> Optional[int]:
global _tqdm_active
_lowercase : List[Any] = True
hf_hub_utils.enable_progress_bars()
def __magic_name__ ( ) -> Dict:
global _tqdm_active
_lowercase : List[Any] = False
hf_hub_utils.disable_progress_bars()
| 677 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase = {
"configuration_swinv2": ["SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Swinv2Config"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Swinv2ForImageClassification",
"Swinv2ForMaskedImageModeling",
"Swinv2Model",
"Swinv2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 677 | 1 |
from decimal import Decimal, getcontext
from math import ceil, factorial
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> str:
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise TypeError('Undefined for non-integers' )
elif precision < 1:
raise ValueError('Undefined for non-natural numbers' )
_lowercase : List[Any] = precision
_lowercase : Optional[Any] = ceil(precision / 14 )
_lowercase : Dict = 426_880 * Decimal(10_005 ).sqrt()
_lowercase : str = 1
_lowercase : List[Any] = 13_591_409
_lowercase : str = Decimal(SCREAMING_SNAKE_CASE )
for k in range(1 , SCREAMING_SNAKE_CASE ):
_lowercase : str = factorial(6 * k ) // (factorial(3 * k ) * factorial(SCREAMING_SNAKE_CASE ) ** 3)
linear_term += 545_140_134
exponential_term *= -262_537_412_640_768_000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
UpperCamelCase = 50
print(f'''The first {n} digits of pi is: {pi(n)}''')
| 677 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
UpperCamelCase = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase = {
"vocab_file": {
"google/electra-small-generator": (
"https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt"
),
"google/electra-base-generator": "https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt",
"google/electra-large-generator": (
"https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt"
),
"google/electra-small-discriminator": (
"https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt"
),
"google/electra-base-discriminator": (
"https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt"
),
"google/electra-large-discriminator": (
"https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"google/electra-small-generator": (
"https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json"
),
"google/electra-base-generator": (
"https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json"
),
"google/electra-large-generator": (
"https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json"
),
"google/electra-small-discriminator": (
"https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json"
),
"google/electra-base-discriminator": (
"https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json"
),
"google/electra-large-discriminator": (
"https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json"
),
},
}
UpperCamelCase = {
"google/electra-small-generator": 512,
"google/electra-base-generator": 512,
"google/electra-large-generator": 512,
"google/electra-small-discriminator": 512,
"google/electra-base-discriminator": 512,
"google/electra-large-discriminator": 512,
}
UpperCamelCase = {
"google/electra-small-generator": {"do_lower_case": True},
"google/electra-base-generator": {"do_lower_case": True},
"google/electra-large-generator": {"do_lower_case": True},
"google/electra-small-discriminator": {"do_lower_case": True},
"google/electra-base-discriminator": {"do_lower_case": True},
"google/electra-large-discriminator": {"do_lower_case": True},
}
class lowerCAmelCase_ ( __snake_case ):
_UpperCamelCase : Any = VOCAB_FILES_NAMES
_UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : str = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : List[str] = ElectraTokenizer
def __init__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase="[UNK]" , _lowerCAmelCase="[SEP]" , _lowerCAmelCase="[PAD]" , _lowerCAmelCase="[CLS]" , _lowerCAmelCase="[MASK]" , _lowerCAmelCase=True , _lowerCAmelCase=None , **_lowerCAmelCase , ):
super().__init__(
_lowerCAmelCase , tokenizer_file=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , tokenize_chinese_chars=_lowerCAmelCase , strip_accents=_lowerCAmelCase , **_lowerCAmelCase , )
_lowercase : Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _lowerCAmelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , _lowerCAmelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _lowerCAmelCase ) != tokenize_chinese_chars
):
_lowercase : Any = getattr(_lowerCAmelCase , normalizer_state.pop('type' ) )
_lowercase : Dict = do_lower_case
_lowercase : Optional[Any] = strip_accents
_lowercase : Any = tokenize_chinese_chars
_lowercase : Tuple = normalizer_class(**_lowerCAmelCase )
_lowercase : Union[str, Any] = do_lower_case
def __a ( self , _lowerCAmelCase , _lowerCAmelCase=None ):
_lowercase : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __a ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
_lowercase : str = [self.sep_token_id]
_lowercase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
_lowercase : Any = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
| 677 | 1 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
for attribute in key.split('.' ):
_lowercase : Union[str, Any] = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if weight_type is not None:
_lowercase : Optional[int] = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).shape
else:
_lowercase : Optional[Any] = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
_lowercase : List[str] = value
elif weight_type == "weight_g":
_lowercase : Any = value
elif weight_type == "weight_v":
_lowercase : Tuple = value
elif weight_type == "bias":
_lowercase : List[str] = value
else:
_lowercase : Dict = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
_lowercase : Optional[int] = []
_lowercase : Optional[int] = fairseq_model.state_dict()
_lowercase : Dict = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
_lowercase : Dict = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == 'group' , )
_lowercase : int = True
else:
for key, mapped_key in MAPPING.items():
_lowercase : Union[str, Any] = 'hubert.' + mapped_key if (is_finetuned and mapped_key != 'lm_head') else mapped_key
if key in name or (key.split('w2v_model.' )[-1] == name.split('.' )[0] and not is_finetuned):
_lowercase : Union[str, Any] = True
if "*" in mapped_key:
_lowercase : Dict = name.split(SCREAMING_SNAKE_CASE )[0].split('.' )[-2]
_lowercase : Dict = mapped_key.replace('*' , SCREAMING_SNAKE_CASE )
if "weight_g" in name:
_lowercase : Optional[int] = 'weight_g'
elif "weight_v" in name:
_lowercase : Optional[Any] = 'weight_v'
elif "weight" in name:
_lowercase : str = 'weight'
elif "bias" in name:
_lowercase : Any = 'bias'
else:
_lowercase : str = None
set_recursively(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE )
logger.warning(F"""Unused weights: {unused_weights}""" )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
_lowercase : Any = full_name.split('conv_layers.' )[-1]
_lowercase : Any = name.split('.' )
_lowercase : Optional[Any] = int(items[0] )
_lowercase : List[str] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
_lowercase : Optional[Any] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
_lowercase : List[str] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
_lowercase : Union[str, Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
_lowercase : List[Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True ) -> Optional[Any]:
if config_path is not None:
_lowercase : Optional[int] = HubertConfig.from_pretrained(SCREAMING_SNAKE_CASE )
else:
_lowercase : List[Any] = HubertConfig()
if is_finetuned:
if dict_path:
_lowercase : List[str] = Dictionary.load(SCREAMING_SNAKE_CASE )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_lowercase : Dict = target_dict.pad_index
_lowercase : Dict = target_dict.bos_index
_lowercase : Tuple = target_dict.eos_index
_lowercase : List[Any] = len(target_dict.symbols )
_lowercase : Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE , 'vocab.json' )
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(SCREAMING_SNAKE_CASE ) )
return
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
with open(SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(target_dict.indices , SCREAMING_SNAKE_CASE )
_lowercase : int = WavaVecaCTCTokenizer(
SCREAMING_SNAKE_CASE , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=SCREAMING_SNAKE_CASE , )
_lowercase : str = True if config.feat_extract_norm == 'layer' else False
_lowercase : Optional[int] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , )
_lowercase : Tuple = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
_lowercase : List[Any] = HubertForCTC(SCREAMING_SNAKE_CASE )
else:
_lowercase : List[Any] = HubertModel(SCREAMING_SNAKE_CASE )
if is_finetuned:
_lowercase , _lowercase , _lowercase : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
_lowercase , _lowercase , _lowercase : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
_lowercase : int = model[0].eval()
recursively_load_weights(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
UpperCamelCase = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 677 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 677 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase = {"configuration_unispeech": ["UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP", "UniSpeechConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST",
"UniSpeechForCTC",
"UniSpeechForPreTraining",
"UniSpeechForSequenceClassification",
"UniSpeechModel",
"UniSpeechPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 677 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
for attribute in key.split('.' ):
_lowercase : Union[str, Any] = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if weight_type is not None:
_lowercase : Optional[int] = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).shape
else:
_lowercase : Optional[Any] = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
_lowercase : List[str] = value
elif weight_type == "weight_g":
_lowercase : Any = value
elif weight_type == "weight_v":
_lowercase : Tuple = value
elif weight_type == "bias":
_lowercase : List[str] = value
else:
_lowercase : Dict = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
_lowercase : Optional[int] = []
_lowercase : Optional[int] = fairseq_model.state_dict()
_lowercase : Dict = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
_lowercase : Dict = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == 'group' , )
_lowercase : int = True
else:
for key, mapped_key in MAPPING.items():
_lowercase : Union[str, Any] = 'hubert.' + mapped_key if (is_finetuned and mapped_key != 'lm_head') else mapped_key
if key in name or (key.split('w2v_model.' )[-1] == name.split('.' )[0] and not is_finetuned):
_lowercase : Union[str, Any] = True
if "*" in mapped_key:
_lowercase : Dict = name.split(SCREAMING_SNAKE_CASE )[0].split('.' )[-2]
_lowercase : Dict = mapped_key.replace('*' , SCREAMING_SNAKE_CASE )
if "weight_g" in name:
_lowercase : Optional[int] = 'weight_g'
elif "weight_v" in name:
_lowercase : Optional[Any] = 'weight_v'
elif "weight" in name:
_lowercase : str = 'weight'
elif "bias" in name:
_lowercase : Any = 'bias'
else:
_lowercase : str = None
set_recursively(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE )
logger.warning(F"""Unused weights: {unused_weights}""" )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
_lowercase : Any = full_name.split('conv_layers.' )[-1]
_lowercase : Any = name.split('.' )
_lowercase : Optional[Any] = int(items[0] )
_lowercase : List[str] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
_lowercase : Optional[Any] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
_lowercase : List[str] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
_lowercase : Union[str, Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
_lowercase : List[Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True ) -> Optional[Any]:
if config_path is not None:
_lowercase : Optional[int] = HubertConfig.from_pretrained(SCREAMING_SNAKE_CASE )
else:
_lowercase : List[Any] = HubertConfig()
if is_finetuned:
if dict_path:
_lowercase : List[str] = Dictionary.load(SCREAMING_SNAKE_CASE )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_lowercase : Dict = target_dict.pad_index
_lowercase : Dict = target_dict.bos_index
_lowercase : Tuple = target_dict.eos_index
_lowercase : List[Any] = len(target_dict.symbols )
_lowercase : Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE , 'vocab.json' )
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(SCREAMING_SNAKE_CASE ) )
return
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
with open(SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(target_dict.indices , SCREAMING_SNAKE_CASE )
_lowercase : int = WavaVecaCTCTokenizer(
SCREAMING_SNAKE_CASE , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=SCREAMING_SNAKE_CASE , )
_lowercase : str = True if config.feat_extract_norm == 'layer' else False
_lowercase : Optional[int] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , )
_lowercase : Tuple = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
_lowercase : List[Any] = HubertForCTC(SCREAMING_SNAKE_CASE )
else:
_lowercase : List[Any] = HubertModel(SCREAMING_SNAKE_CASE )
if is_finetuned:
_lowercase , _lowercase , _lowercase : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
_lowercase , _lowercase , _lowercase : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
_lowercase : int = model[0].eval()
recursively_load_weights(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
UpperCamelCase = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 677 | 1 |
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> list[list]:
_lowercase : Tuple = current_set.copy()
for row_index, row in enumerate(SCREAMING_SNAKE_CASE ):
_lowercase : List[Any] = row[0]
for column_index, column in enumerate(SCREAMING_SNAKE_CASE ):
if magnitude == 0:
_lowercase : int = column
continue
_lowercase : int = column / magnitude
# Subtract to cancel term
_lowercase : Optional[int] = current_set[0]
_lowercase : str = [first_row]
_lowercase : Tuple = current_set[1::]
for row in current_set:
_lowercase : Dict = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(SCREAMING_SNAKE_CASE )
continue
for column_index in range(len(SCREAMING_SNAKE_CASE ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(SCREAMING_SNAKE_CASE )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
_lowercase : str = final_set[0]
_lowercase : List[str] = []
_lowercase : Optional[Any] = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
_lowercase : str = simplify(SCREAMING_SNAKE_CASE )
for i in range(len(SCREAMING_SNAKE_CASE ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , SCREAMING_SNAKE_CASE )
_lowercase : Optional[int] = resultant
return final_set
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> list:
if len(SCREAMING_SNAKE_CASE ) == 0:
raise IndexError('solve_simultaneous() requires n lists of length n+1' )
_lowercase : List[str] = len(SCREAMING_SNAKE_CASE ) + 1
if any(len(SCREAMING_SNAKE_CASE ) != _length for item in equations ):
raise IndexError('solve_simultaneous() requires n lists of length n+1' )
for row in equations:
if any(not isinstance(SCREAMING_SNAKE_CASE , (int, float) ) for column in row ):
raise ValueError('solve_simultaneous() requires lists of integers' )
if len(SCREAMING_SNAKE_CASE ) == 1:
return [equations[0][-1] / equations[0][0]]
_lowercase : Dict = equations.copy()
if any(0 in row for row in data_set ):
_lowercase : str = data_set.copy()
_lowercase : Optional[Any] = []
for row_index, row in enumerate(SCREAMING_SNAKE_CASE ):
if 0 not in row:
_lowercase : Dict = data_set.pop(SCREAMING_SNAKE_CASE )
break
if not full_row:
raise ValueError('solve_simultaneous() requires at least 1 full equation' )
data_set.insert(0 , SCREAMING_SNAKE_CASE )
_lowercase : str = data_set.copy()
_lowercase : List[Any] = simplify(SCREAMING_SNAKE_CASE )
_lowercase : str = simplified[::-1]
_lowercase : list = []
for row in simplified:
_lowercase : Optional[Any] = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
_lowercase : int = row.copy()[: len(SCREAMING_SNAKE_CASE ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(SCREAMING_SNAKE_CASE ) == 0:
solutions.append(0 )
continue
_lowercase : List[str] = temp_row[1::]
_lowercase : int = temp_row[::-1]
for column_index, column in enumerate(SCREAMING_SNAKE_CASE ):
current_solution -= column * solutions[column_index]
solutions.append(SCREAMING_SNAKE_CASE )
_lowercase : Optional[Any] = []
for item in solutions:
final.append(float(round(SCREAMING_SNAKE_CASE , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 677 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=1_3 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=9_9 , _lowerCAmelCase=3_2 , _lowerCAmelCase=2 , _lowerCAmelCase=4 , _lowerCAmelCase=3_7 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=5_1_2 , _lowerCAmelCase=1_6 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=None , _lowerCAmelCase=1_0_0_0 , ):
_lowercase : List[str] = parent
_lowercase : Optional[Any] = batch_size
_lowercase : str = seq_length
_lowercase : Dict = is_training
_lowercase : Optional[int] = use_input_mask
_lowercase : List[Any] = use_token_type_ids
_lowercase : Union[str, Any] = use_labels
_lowercase : Optional[Any] = vocab_size
_lowercase : Optional[Any] = hidden_size
_lowercase : str = num_hidden_layers
_lowercase : Tuple = num_attention_heads
_lowercase : Optional[Any] = intermediate_size
_lowercase : Optional[Any] = hidden_act
_lowercase : Union[str, Any] = hidden_dropout_prob
_lowercase : Union[str, Any] = attention_probs_dropout_prob
_lowercase : int = max_position_embeddings
_lowercase : str = type_vocab_size
_lowercase : Tuple = type_sequence_label_size
_lowercase : Dict = initializer_range
_lowercase : List[Any] = num_labels
_lowercase : List[str] = num_choices
_lowercase : Dict = scope
_lowercase : List[Any] = range_bbox
def __a ( self ):
_lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
_lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_lowercase : List[str] = bbox[i, j, 3]
_lowercase : Optional[int] = bbox[i, j, 1]
_lowercase : int = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_lowercase : Dict = bbox[i, j, 2]
_lowercase : Dict = bbox[i, j, 0]
_lowercase : int = t
_lowercase : Union[str, Any] = tf.convert_to_tensor(_lowerCAmelCase )
_lowercase : Any = None
if self.use_input_mask:
_lowercase : int = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase : Tuple = None
if self.use_token_type_ids:
_lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowercase : Tuple = None
_lowercase : Union[str, Any] = None
_lowercase : List[str] = None
if self.use_labels:
_lowercase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowercase : str = ids_tensor([self.batch_size] , self.num_choices )
_lowercase : Any = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[Any] = TFLayoutLMModel(config=_lowerCAmelCase )
_lowercase : List[Any] = model(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
_lowercase : List[Any] = model(_lowerCAmelCase , _lowerCAmelCase , token_type_ids=_lowerCAmelCase )
_lowercase : List[str] = model(_lowerCAmelCase , _lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[Any] = TFLayoutLMForMaskedLM(config=_lowerCAmelCase )
_lowercase : Any = model(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : str = self.num_labels
_lowercase : Tuple = TFLayoutLMForSequenceClassification(config=_lowerCAmelCase )
_lowercase : int = model(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Any = self.num_labels
_lowercase : Optional[int] = TFLayoutLMForTokenClassification(config=_lowerCAmelCase )
_lowercase : Union[str, Any] = model(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Union[str, Any] = TFLayoutLMForQuestionAnswering(config=_lowerCAmelCase )
_lowercase : str = model(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self ):
_lowercase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : List[Any] = config_and_inputs
_lowercase : Optional[Any] = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( __snake_case , __snake_case , unittest.TestCase ):
_UpperCamelCase : Optional[int] = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
_UpperCamelCase : Union[str, Any] = (
{
"feature-extraction": TFLayoutLMModel,
"fill-mask": TFLayoutLMForMaskedLM,
"text-classification": TFLayoutLMForSequenceClassification,
"token-classification": TFLayoutLMForTokenClassification,
"zero-shot": TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
_UpperCamelCase : str = False
_UpperCamelCase : List[str] = True
_UpperCamelCase : Tuple = 10
def __a ( self ):
_lowercase : Optional[int] = TFLayoutLMModelTester(self )
_lowercase : str = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=3_7 )
def __a ( self ):
self.config_tester.run_common_tests()
def __a ( self ):
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def __a ( self ):
_lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowerCAmelCase )
def __a ( self ):
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowerCAmelCase )
def __a ( self ):
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowerCAmelCase )
def __a ( self ):
_lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowerCAmelCase )
@slow
def __a ( self ):
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : List[Any] = TFLayoutLMModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@unittest.skip('Onnx compliancy broke with TF 2.10' )
def __a ( self ):
pass
def __magic_name__ ( ) -> Optional[int]:
# Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on:
# fmt: off
_lowercase : Optional[Any] = tf.convert_to_tensor([[101,1_019,1_014,1_016,1_037,12_849,4_747,1_004,14_246,2_278,5_439,4_524,5_002,2_930,2_193,2_930,4_341,3_208,1_005,1_055,2_171,2_848,11_300,3_531,102],[101,4_070,4_034,7_020,1_024,3_058,1_015,1_013,2_861,1_013,6_070,19_274,2_772,6_205,27_814,16_147,16_147,4_343,2_047,10_283,10_969,14_389,1_012,2_338,102]] ) # noqa: E231
_lowercase : Tuple = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
_lowercase : Optional[int] = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1_000,1_000,1_000,1_000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1_000,1_000,1_000,1_000]]] ) # noqa: E231
_lowercase : int = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
_lowercase : Union[str, Any] = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def __a ( self ):
_lowercase : Tuple = TFLayoutLMModel.from_pretrained('microsoft/layoutlm-base-uncased' )
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase : Optional[int] = prepare_layoutlm_batch_inputs()
# forward pass
_lowercase : Tuple = model(input_ids=_lowerCAmelCase , bbox=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
# test the sequence output on [0, :3, :3]
_lowercase : Optional[Any] = tf.convert_to_tensor(
[[0.17_85, -0.19_47, -0.04_25], [-0.32_54, -0.28_07, 0.25_53], [-0.53_91, -0.33_22, 0.33_64]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _lowerCAmelCase , atol=1E-3 ) )
# test the pooled output on [1, :3]
_lowercase : Optional[int] = tf.convert_to_tensor([-0.65_80, -0.02_14, 0.85_52] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , _lowerCAmelCase , atol=1E-3 ) )
@slow
def __a ( self ):
# initialize model with randomly initialized sequence classification head
_lowercase : Optional[Any] = TFLayoutLMForSequenceClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=2 )
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase : Optional[Any] = prepare_layoutlm_batch_inputs()
# forward pass
_lowercase : Any = model(
input_ids=_lowerCAmelCase , bbox=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
_lowercase : List[Any] = outputs.loss
_lowercase : Any = (2,)
self.assertEqual(loss.shape , _lowerCAmelCase )
# test the shape of the logits
_lowercase : str = outputs.logits
_lowercase : Dict = (2, 2)
self.assertEqual(logits.shape , _lowerCAmelCase )
@slow
def __a ( self ):
# initialize model with randomly initialized token classification head
_lowercase : Dict = TFLayoutLMForTokenClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=1_3 )
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase : str = prepare_layoutlm_batch_inputs()
# forward pass
_lowercase : Dict = model(
input_ids=_lowerCAmelCase , bbox=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
# test the shape of the logits
_lowercase : Dict = outputs.logits
_lowercase : Optional[Any] = tf.convert_to_tensor((2, 2_5, 1_3) )
self.assertEqual(logits.shape , _lowerCAmelCase )
@slow
def __a ( self ):
# initialize model with randomly initialized token classification head
_lowercase : Union[str, Any] = TFLayoutLMForQuestionAnswering.from_pretrained('microsoft/layoutlm-base-uncased' )
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase : List[Any] = prepare_layoutlm_batch_inputs()
# forward pass
_lowercase : int = model(input_ids=_lowerCAmelCase , bbox=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
# test the shape of the logits
_lowercase : Any = tf.convert_to_tensor((2, 2_5) )
self.assertEqual(outputs.start_logits.shape , _lowerCAmelCase )
self.assertEqual(outputs.end_logits.shape , _lowerCAmelCase )
| 677 | 1 |
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
if index == number_of_items:
return 0
_lowercase : Optional[int] = 0
_lowercase : List[Any] = 0
_lowercase : List[str] = knapsack(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , index + 1 )
if weights[index] <= max_weight:
_lowercase : str = values[index] + knapsack(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , max_weight - weights[index] , index + 1 )
return max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 677 |
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class lowerCAmelCase_ ( unittest.TestCase ):
def __a ( self ):
_lowercase : List[str] = logging.get_logger()
# the current default level is logging.WARNING
_lowercase : Union[str, Any] = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(_lowerCAmelCase )
def __a ( self ):
_lowercase : List[str] = logging.get_verbosity()
_lowercase : int = logging.get_logger('transformers.models.bart.tokenization_bart' )
_lowercase : Tuple = 'Testing 1, 2, 3'
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(_lowerCAmelCase ) as cl:
logger.warning(_lowerCAmelCase )
self.assertEqual(cl.out , msg + '\n' )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(_lowerCAmelCase ) as cl:
logger.warning(_lowerCAmelCase )
self.assertEqual(cl.out , '' )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(_lowerCAmelCase ) as cl:
logger.warning(_lowerCAmelCase )
self.assertEqual(cl.out , msg + '\n' )
# restore to the original level
logging.set_verbosity(_lowerCAmelCase )
@mockenv(TRANSFORMERS_VERBOSITY='error' )
def __a ( self ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
_lowercase : List[str] = logging.get_logger('transformers.models.bart.tokenization_bart' )
_lowercase : int = os.getenv('TRANSFORMERS_VERBOSITY' , _lowerCAmelCase )
_lowercase : Optional[Any] = logging.log_levels[env_level_str]
_lowercase : Dict = logging.get_verbosity()
self.assertEqual(
_lowerCAmelCase , _lowerCAmelCase , F"""TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}""" , )
# restore to the original level
_lowercase : Any = ''
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY='super-error' )
def __a ( self ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
_lowercase : Tuple = logging.logging.getLogger()
with CaptureLogger(_lowerCAmelCase ) as cl:
# this action activates the env var
logging.get_logger('transformers.models.bart.tokenization_bart' )
self.assertIn('Unknown option TRANSFORMERS_VERBOSITY=super-error' , cl.out )
# no need to restore as nothing was changed
def __a ( self ):
# testing `logger.warning_advice()`
transformers.utils.logging._reset_library_root_logger()
_lowercase : str = logging.get_logger('transformers.models.bart.tokenization_bart' )
_lowercase : List[str] = 'Testing 1, 2, 3'
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='1' ):
# nothing should be logged as env var disables this method
with CaptureLogger(_lowerCAmelCase ) as cl:
logger.warning_advice(_lowerCAmelCase )
self.assertEqual(cl.out , '' )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='' ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(_lowerCAmelCase ) as cl:
logger.warning_advice(_lowerCAmelCase )
self.assertEqual(cl.out , msg + '\n' )
def __magic_name__ ( ) -> List[str]:
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 677 | 1 |
from __future__ import annotations
from collections import deque
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase ):
_lowercase : list[dict] = []
self.adlist.append(
{'value': '', 'next_states': [], 'fail_state': 0, 'output': []} )
for keyword in keywords:
self.add_keyword(_lowerCAmelCase )
self.set_fail_transitions()
def __a ( self , _lowerCAmelCase , _lowerCAmelCase ):
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def __a ( self , _lowerCAmelCase ):
_lowercase : str = 0
for character in keyword:
_lowercase : Optional[Any] = self.find_next_state(_lowerCAmelCase , _lowerCAmelCase )
if next_state is None:
self.adlist.append(
{
'value': character,
'next_states': [],
'fail_state': 0,
'output': [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
_lowercase : Optional[Any] = len(self.adlist ) - 1
else:
_lowercase : Any = next_state
self.adlist[current_state]["output"].append(_lowerCAmelCase )
def __a ( self ):
_lowercase : deque = deque()
for node in self.adlist[0]["next_states"]:
q.append(_lowerCAmelCase )
_lowercase : Optional[int] = 0
while q:
_lowercase : Optional[int] = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(_lowerCAmelCase )
_lowercase : int = self.adlist[r]['fail_state']
while (
self.find_next_state(_lowerCAmelCase , self.adlist[child]['value'] ) is None
and state != 0
):
_lowercase : Tuple = self.adlist[state]['fail_state']
_lowercase : Dict = self.find_next_state(
_lowerCAmelCase , self.adlist[child]['value'] )
if self.adlist[child]["fail_state"] is None:
_lowercase : List[Any] = 0
_lowercase : Union[str, Any] = (
self.adlist[child]['output']
+ self.adlist[self.adlist[child]['fail_state']]['output']
)
def __a ( self , _lowerCAmelCase ):
_lowercase : dict = {} # returns a dict with keywords and list of its occurrences
_lowercase : Any = 0
for i in range(len(_lowerCAmelCase ) ):
while (
self.find_next_state(_lowerCAmelCase , string[i] ) is None
and current_state != 0
):
_lowercase : List[Any] = self.adlist[current_state]['fail_state']
_lowercase : Union[str, Any] = self.find_next_state(_lowerCAmelCase , string[i] )
if next_state is None:
_lowercase : Any = 0
else:
_lowercase : List[Any] = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
_lowercase : Tuple = []
result[key].append(i - len(_lowerCAmelCase ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 677 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
UpperCamelCase = "pt"
elif is_tf_available():
UpperCamelCase = "tf"
else:
UpperCamelCase = "jax"
class lowerCAmelCase_ ( __snake_case , unittest.TestCase ):
_UpperCamelCase : Dict = PerceiverTokenizer
_UpperCamelCase : str = False
def __a ( self ):
super().setUp()
_lowercase : List[Any] = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __a ( self ):
return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' )
def __a ( self , **_lowerCAmelCase ):
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=2_0 , _lowerCAmelCase=5 ):
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
_lowercase : Union[str, Any] = []
for i in range(len(_lowerCAmelCase ) ):
try:
_lowercase : Any = tokenizer.decode([i] , clean_up_tokenization_spaces=_lowerCAmelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
_lowercase : List[Any] = list(filter(lambda _lowerCAmelCase : re.match(r'^[ a-zA-Z]+$' , t[1] ) , _lowerCAmelCase ) )
_lowercase : Union[str, Any] = list(filter(lambda _lowerCAmelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_lowerCAmelCase ) , _lowerCAmelCase ) )
if max_length is not None and len(_lowerCAmelCase ) > max_length:
_lowercase : Any = toks[:max_length]
if min_length is not None and len(_lowerCAmelCase ) < min_length and len(_lowerCAmelCase ) > 0:
while len(_lowerCAmelCase ) < min_length:
_lowercase : Optional[Any] = toks + toks
# toks_str = [t[1] for t in toks]
_lowercase : Optional[Any] = [t[0] for t in toks]
# Ensure consistency
_lowercase : Any = tokenizer.decode(_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase )
if " " not in output_txt and len(_lowerCAmelCase ) > 1:
_lowercase : List[str] = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_lowerCAmelCase )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_lowerCAmelCase )
)
if with_prefix_space:
_lowercase : List[Any] = ' ' + output_txt
_lowercase : Dict = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
return output_txt, output_ids
def __a ( self ):
_lowercase : Dict = self.perceiver_tokenizer
_lowercase : Optional[Any] = 'Unicode €.'
_lowercase : str = tokenizer(_lowerCAmelCase )
_lowercase : int = [4, 9_1, 1_1_6, 1_1_1, 1_0_5, 1_1_7, 1_0_6, 1_0_7, 3_8, 2_3_2, 1_3_6, 1_7_8, 5_2, 5]
self.assertEqual(encoded['input_ids'] , _lowerCAmelCase )
# decoding
_lowercase : List[Any] = tokenizer.decode(_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , '[CLS]Unicode €.[SEP]' )
_lowercase : Union[str, Any] = tokenizer('e è é ê ë' )
_lowercase : List[Any] = [4, 1_0_7, 3_8, 2_0_1, 1_7_4, 3_8, 2_0_1, 1_7_5, 3_8, 2_0_1, 1_7_6, 3_8, 2_0_1, 1_7_7, 5]
self.assertEqual(encoded['input_ids'] , _lowerCAmelCase )
# decoding
_lowercase : int = tokenizer.decode(_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , '[CLS]e è é ê ë[SEP]' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , '[CLS]e è é ê ë[SEP]' )
def __a ( self ):
_lowercase : List[str] = self.perceiver_tokenizer
_lowercase : Union[str, Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
_lowercase : Optional[int] = [4, 7_1, 3_8, 1_1_4, 1_1_7, 1_1_6, 1_0_9, 3_8, 1_1_8, 1_0_3, 1_2_0, 1_0_3, 1_0_9, 1_2_0, 1_0_3, 1_1_8, 1_1_0, 3_8, 1_0_8, 1_1_7, 1_2_0, 3_8, 1_2_1, 1_2_3, 1_1_5, 1_1_5, 1_0_3, 1_2_0, 1_1_1, 1_2_8, 1_0_3, 1_2_2, 1_1_1, 1_1_7, 1_1_6, 5_2, 5, 0]
# fmt: on
_lowercase : List[Any] = tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors=_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
if FRAMEWORK != "jax":
_lowercase : int = list(batch.input_ids.numpy()[0] )
else:
_lowercase : List[Any] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual((2, 3_8) , batch.input_ids.shape )
self.assertEqual((2, 3_8) , batch.attention_mask.shape )
def __a ( self ):
_lowercase : List[Any] = self.perceiver_tokenizer
_lowercase : Dict = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_lowercase : List[str] = tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors=_lowerCAmelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , _lowerCAmelCase )
self.assertIn('attention_mask' , _lowerCAmelCase )
self.assertNotIn('decoder_input_ids' , _lowerCAmelCase )
self.assertNotIn('decoder_attention_mask' , _lowerCAmelCase )
def __a ( self ):
_lowercase : Optional[int] = self.perceiver_tokenizer
_lowercase : Optional[Any] = [
'Summary of the text.',
'Another summary.',
]
_lowercase : Optional[int] = tokenizer(
text_target=_lowerCAmelCase , max_length=3_2 , padding='max_length' , truncation=_lowerCAmelCase , return_tensors=_lowerCAmelCase )
self.assertEqual(3_2 , targets['input_ids'].shape[1] )
def __a ( self ):
# safety check on max_len default value so we are sure the test works
_lowercase : Tuple = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 4_2 )
# Now let's start the test
_lowercase : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
_lowercase : Dict = tempfile.mkdtemp()
_lowercase : Tuple = ' He is very happy, UNwant\u00E9d,running'
_lowercase : Union[str, Any] = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
tokenizer.save_pretrained(_lowerCAmelCase )
_lowercase : Tuple = tokenizer.__class__.from_pretrained(_lowerCAmelCase )
_lowercase : Optional[Any] = after_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
shutil.rmtree(_lowerCAmelCase )
_lowercase : Union[str, Any] = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
_lowercase : List[str] = tempfile.mkdtemp()
_lowercase : int = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
_lowercase : Any = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
_lowercase : Tuple = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
tokenizer.save_pretrained(_lowerCAmelCase )
_lowercase : Tuple = tokenizer.__class__.from_pretrained(_lowerCAmelCase )
_lowercase : Tuple = after_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 4_2 )
_lowercase : List[Any] = tokenizer.__class__.from_pretrained(_lowerCAmelCase , model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length , 4_3 )
shutil.rmtree(_lowerCAmelCase )
def __a ( self ):
_lowercase : Optional[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
_lowercase : List[str] = json.load(_lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
_lowercase : Tuple = json.load(_lowerCAmelCase )
_lowercase : Any = [F"""<extra_id_{i}>""" for i in range(1_2_5 )]
_lowercase : str = added_tokens_extra_ids + [
'an_additional_special_token'
]
_lowercase : Optional[int] = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(_lowerCAmelCase , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_lowercase : Optional[int] = tokenizer_class.from_pretrained(
_lowerCAmelCase , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_lowercase : int = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=_lowerCAmelCase )]
_lowercase : Tuple = tokenizer_class.from_pretrained(
_lowerCAmelCase , additional_special_tokens=_lowerCAmelCase , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def __a ( self ):
_lowercase : str = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([1_7_8] ) , '�' )
def __a ( self ):
pass
def __a ( self ):
pass
def __a ( self ):
pass
def __a ( self ):
pass
def __a ( self ):
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
_lowercase : List[str] = self.get_tokenizers(fast=_lowerCAmelCase , do_lower_case=_lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_lowercase : Optional[Any] = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]']
_lowercase : Optional[Any] = tokenizer.convert_tokens_to_string(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
| 677 | 1 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
# Initialise PyTorch model
_lowercase : int = RemBertConfig.from_json_file(SCREAMING_SNAKE_CASE )
print('Building PyTorch model from configuration: {}'.format(str(SCREAMING_SNAKE_CASE ) ) )
_lowercase : int = RemBertModel(SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Save pytorch-model
print('Save PyTorch model to {}'.format(SCREAMING_SNAKE_CASE ) )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--rembert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained RemBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCamelCase = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 677 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase = {
"configuration_conditional_detr": [
"CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ConditionalDetrConfig",
"ConditionalDetrOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["ConditionalDetrFeatureExtractor"]
UpperCamelCase = ["ConditionalDetrImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConditionalDetrForObjectDetection",
"ConditionalDetrForSegmentation",
"ConditionalDetrModel",
"ConditionalDetrPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 677 | 1 |
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
UpperCamelCase = logging.getLogger(__name__)
torch.set_grad_enabled(False)
UpperCamelCase = "cuda" if torch.cuda.is_available() else "cpu"
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=100 , SCREAMING_SNAKE_CASE=" " ) -> List[str]:
_lowercase : Union[str, Any] = text.split(SCREAMING_SNAKE_CASE )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )]
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> dict:
_lowercase , _lowercase : List[str] = [], []
for title, text in zip(documents['title'] , documents['text'] ):
if text is not None:
for passage in split_text(SCREAMING_SNAKE_CASE ):
titles.append(title if title is not None else '' )
texts.append(SCREAMING_SNAKE_CASE )
return {"title": titles, "text": texts}
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> dict:
_lowercase : Tuple = ctx_tokenizer(
documents['title'] , documents['text'] , truncation=SCREAMING_SNAKE_CASE , padding='longest' , return_tensors='pt' )['input_ids']
_lowercase : List[Any] = ctx_encoder(input_ids.to(device=SCREAMING_SNAKE_CASE ) , return_dict=SCREAMING_SNAKE_CASE ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
######################################
logger.info('Step 1 - Create the dataset' )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
_lowercase : Any = load_dataset(
'csv' , data_files=[rag_example_args.csv_path] , split='train' , delimiter='\t' , column_names=['title', 'text'] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
_lowercase : str = dataset.map(SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE , num_proc=processing_args.num_proc )
# And compute the embeddings
_lowercase : Any = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=SCREAMING_SNAKE_CASE )
_lowercase : List[str] = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
_lowercase : List[Any] = Features(
{'text': Value('string' ), 'title': Value('string' ), 'embeddings': Sequence(Value('float32' ) )} ) # optional, save as float32 instead of float64 to save space
_lowercase : int = dataset.map(
partial(SCREAMING_SNAKE_CASE , ctx_encoder=SCREAMING_SNAKE_CASE , ctx_tokenizer=SCREAMING_SNAKE_CASE ) , batched=SCREAMING_SNAKE_CASE , batch_size=processing_args.batch_size , features=SCREAMING_SNAKE_CASE , )
# And finally save your dataset
_lowercase : List[str] = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset' )
dataset.save_to_disk(SCREAMING_SNAKE_CASE )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info('Step 2 - Index the dataset' )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
_lowercase : int = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index('embeddings' , custom_index=SCREAMING_SNAKE_CASE )
# And save the index
_lowercase : Any = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset_hnsw_index.faiss' )
dataset.get_index('embeddings' ).save(SCREAMING_SNAKE_CASE )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class lowerCAmelCase_ :
_UpperCamelCase : str = field(
default=str(Path(__snake_case ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) , metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} , )
_UpperCamelCase : Optional[str] = field(
default=__snake_case , metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} , )
_UpperCamelCase : str = field(
default="facebook/rag-sequence-nq" , metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} , )
_UpperCamelCase : str = field(
default="facebook/dpr-ctx_encoder-multiset-base" , metadata={
"help": (
"The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"
" 'facebook/dpr-ctx_encoder-multiset-base'"
)
} , )
_UpperCamelCase : Optional[str] = field(
default=str(Path(__snake_case ).parent / "test_run" / "dummy-kb" ) , metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} , )
@dataclass
class lowerCAmelCase_ :
_UpperCamelCase : Optional[int] = field(
default=__snake_case , metadata={
"help": "The number of processes to use to split the documents into passages. Default is single process."
} , )
_UpperCamelCase : int = field(
default=16 , metadata={
"help": "The batch size to use when computing the passages embeddings using the DPR context encoder."
} , )
@dataclass
class lowerCAmelCase_ :
_UpperCamelCase : int = field(
default=768 , metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} , )
_UpperCamelCase : int = field(
default=128 , metadata={
"help": (
"The number of bi-directional links created for every new element during the HNSW index construction."
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
UpperCamelCase = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
UpperCamelCase , UpperCamelCase , UpperCamelCase = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
UpperCamelCase = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 677 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCAmelCase_ ( __snake_case ):
_UpperCamelCase : Tuple = "ClapFeatureExtractor"
_UpperCamelCase : Optional[int] = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ):
super().__init__(_lowerCAmelCase , _lowerCAmelCase )
def __call__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , **_lowerCAmelCase ):
_lowercase : str = kwargs.pop('sampling_rate' , _lowerCAmelCase )
if text is None and audios is None:
raise ValueError('You have to specify either text or audios. Both cannot be none.' )
if text is not None:
_lowercase : Dict = self.tokenizer(_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase )
if audios is not None:
_lowercase : Any = self.feature_extractor(
_lowerCAmelCase , sampling_rate=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase )
if text is not None and audios is not None:
_lowercase : Union[str, Any] = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_lowerCAmelCase ) , tensor_type=_lowerCAmelCase )
def __a ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
return self.tokenizer.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase )
def __a ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
return self.tokenizer.decode(*_lowerCAmelCase , **_lowerCAmelCase )
@property
def __a ( self ):
_lowercase : Dict = self.tokenizer.model_input_names
_lowercase : Any = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 677 | 1 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __magic_name__ ( ) -> List[Any]:
_lowercase : Dict = ArgumentParser(
description=(
'PyTorch TPU distributed training launch '
'helper utility that will spawn up '
'multiple distributed processes'
) )
# Optional arguments for the launch helper
parser.add_argument('--num_cores' , type=SCREAMING_SNAKE_CASE , default=1 , help='Number of TPU cores to use (1 or 8).' )
# positional
parser.add_argument(
'training_script' , type=SCREAMING_SNAKE_CASE , help=(
'The full path to the single TPU training '
'program/script to be launched in parallel, '
'followed by all the arguments for the '
'training script'
) , )
# rest from the training program
parser.add_argument('training_script_args' , nargs=SCREAMING_SNAKE_CASE )
return parser.parse_args()
def __magic_name__ ( ) -> str:
_lowercase : List[Any] = parse_args()
# Import training_script as a module.
_lowercase : int = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
_lowercase : int = script_fpath.stem
_lowercase : int = importlib.import_module(SCREAMING_SNAKE_CASE )
# Patch sys.argv
_lowercase : Optional[Any] = [args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 677 |
from __future__ import annotations
from typing import Any
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase ):
_lowercase : Any = num_of_nodes
_lowercase : list[list[int]] = []
_lowercase : dict[int, int] = {}
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
self.m_edges.append([u_node, v_node, weight] )
def __a ( self , _lowerCAmelCase ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def __a ( self , _lowerCAmelCase ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
_lowercase : Optional[int] = self.find_component(_lowerCAmelCase )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
if component_size[u_node] <= component_size[v_node]:
_lowercase : str = v_node
component_size[v_node] += component_size[u_node]
self.set_component(_lowerCAmelCase )
elif component_size[u_node] >= component_size[v_node]:
_lowercase : Any = self.find_component(_lowerCAmelCase )
component_size[u_node] += component_size[v_node]
self.set_component(_lowerCAmelCase )
def __a ( self ):
_lowercase : Any = []
_lowercase : Optional[Any] = 0
_lowercase : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
_lowercase : str = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
_lowercase , _lowercase , _lowercase : List[str] = edge
_lowercase : Union[str, Any] = self.m_component[u]
_lowercase : Union[str, Any] = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
_lowercase : str = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_lowercase , _lowercase , _lowercase : int = edge
_lowercase : Optional[int] = self.m_component[u]
_lowercase : Optional[Any] = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
print(F"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" )
num_of_components -= 1
_lowercase : str = [-1] * self.m_num_of_nodes
print(F"""The total weight of the minimal spanning tree is: {mst_weight}""" )
def __magic_name__ ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 677 | 1 |
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> list:
_lowercase : Optional[Any] = len(SCREAMING_SNAKE_CASE )
for _ in range(SCREAMING_SNAKE_CASE ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
_lowercase , _lowercase : str = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
UpperCamelCase = list(range(10, 0, -1))
print(f'''Original: {arr}. Sorted: {odd_even_transposition(arr)}''')
| 677 |
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Optional[Any]:
_lowercase : Tuple = {}
_lowercase : str = tokenizer(example['content'] , truncation=SCREAMING_SNAKE_CASE )['input_ids']
_lowercase : List[str] = len(example['content'] ) / len(output['input_ids'] )
return output
UpperCamelCase = HfArgumentParser(PretokenizationArguments)
UpperCamelCase = parser.parse_args()
if args.num_workers is None:
UpperCamelCase = multiprocessing.cpu_count()
UpperCamelCase = AutoTokenizer.from_pretrained(args.tokenizer_dir)
UpperCamelCase = time.time()
UpperCamelCase = load_dataset(args.dataset_name, split="train")
print(f'''Dataset loaded in {time.time()-t_start:.2f}s''')
UpperCamelCase = time.time()
UpperCamelCase = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"repo_name",
"path",
"copies",
"size",
"content",
"license",
"hash",
"line_mean",
"line_max",
"alpha_frac",
"autogenerated",
],
)
print(f'''Dataset tokenized in {time.time()-t_start:.2f}s''')
UpperCamelCase = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(f'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 677 | 1 |
from collections.abc import Sequence
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float:
return sum(c * (x**i) for i, c in enumerate(SCREAMING_SNAKE_CASE ) )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float:
_lowercase : Optional[Any] = 0.0
for coeff in reversed(SCREAMING_SNAKE_CASE ):
_lowercase : Optional[int] = result * x + coeff
return result
if __name__ == "__main__":
UpperCamelCase = (0.0, 0.0, 5.0, 9.3, 7.0)
UpperCamelCase = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 677 |
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
UpperCamelCase = logging.getLogger(__name__)
UpperCamelCase = {"facebook/bart-base": BartForConditionalGeneration}
UpperCamelCase = {"facebook/bart-base": BartTokenizer}
def __magic_name__ ( ) -> str:
_lowercase : Optional[int] = argparse.ArgumentParser(description='Export Bart model + Beam Search to ONNX graph.' )
parser.add_argument(
'--validation_file' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='A csv or a json file containing the validation data.' )
parser.add_argument(
'--max_length' , type=SCREAMING_SNAKE_CASE , default=5 , help='The maximum total input sequence length after tokenization.' , )
parser.add_argument(
'--num_beams' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help=(
'Number of beams to use for evaluation. This argument will be '
'passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'
) , )
parser.add_argument(
'--model_name_or_path' , type=SCREAMING_SNAKE_CASE , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=SCREAMING_SNAKE_CASE , )
parser.add_argument(
'--config_name' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='Pretrained config name or path if not the same as model_name' , )
parser.add_argument(
'--device' , type=SCREAMING_SNAKE_CASE , default='cpu' , help='Device where the model will be run' , )
parser.add_argument('--output_file_path' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='Where to store the final ONNX file.' )
_lowercase : Optional[Any] = parser.parse_args()
return args
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE="cpu" ) -> List[Any]:
_lowercase : Dict = model_dict[model_name].from_pretrained(SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE )
_lowercase : int = tokenizer_dict[model_name].from_pretrained(SCREAMING_SNAKE_CASE )
if model_name in ["facebook/bart-base"]:
_lowercase : Dict = 0
_lowercase : Optional[int] = None
_lowercase : Union[str, Any] = 0
return huggingface_model, tokenizer
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
model.eval()
_lowercase : List[Any] = None
_lowercase : List[str] = torch.jit.script(BARTBeamSearchGenerator(SCREAMING_SNAKE_CASE ) )
with torch.no_grad():
_lowercase : Optional[int] = 'My friends are cool but they eat too many carbs.'
_lowercase : int = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1_024 , return_tensors='pt' ).to(model.device )
_lowercase : str = model.generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , num_beams=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , early_stopping=SCREAMING_SNAKE_CASE , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
SCREAMING_SNAKE_CASE , (
inputs['input_ids'],
inputs['attention_mask'],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , SCREAMING_SNAKE_CASE , opset_version=14 , input_names=['input_ids', 'attention_mask', 'num_beams', 'max_length', 'decoder_start_token_id'] , output_names=['output_ids'] , dynamic_axes={
'input_ids': {0: 'batch', 1: 'seq'},
'output_ids': {0: 'batch', 1: 'seq_out'},
} , example_outputs=SCREAMING_SNAKE_CASE , )
logger.info('Model exported to {}'.format(SCREAMING_SNAKE_CASE ) )
_lowercase : str = remove_dup_initializers(os.path.abspath(SCREAMING_SNAKE_CASE ) )
logger.info('Deduplicated and optimized model written to {}'.format(SCREAMING_SNAKE_CASE ) )
_lowercase : Union[str, Any] = onnxruntime.InferenceSession(SCREAMING_SNAKE_CASE )
_lowercase : Union[str, Any] = ort_sess.run(
SCREAMING_SNAKE_CASE , {
'input_ids': inputs['input_ids'].cpu().numpy(),
'attention_mask': inputs['attention_mask'].cpu().numpy(),
'num_beams': np.array(SCREAMING_SNAKE_CASE ),
'max_length': np.array(SCREAMING_SNAKE_CASE ),
'decoder_start_token_id': np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info('Model outputs from torch and ONNX Runtime are similar.' )
logger.info('Success.' )
def __magic_name__ ( ) -> Any:
_lowercase : Dict = parse_args()
_lowercase : Union[str, Any] = 5
_lowercase : Union[str, Any] = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
_lowercase : Optional[Any] = torch.device(args.device )
_lowercase , _lowercase : List[Any] = load_model_tokenizer(args.model_name_or_path , SCREAMING_SNAKE_CASE )
if model.config.decoder_start_token_id is None:
raise ValueError('Make sure that `config.decoder_start_token_id` is correctly defined' )
model.to(SCREAMING_SNAKE_CASE )
if args.max_length:
_lowercase : Any = args.max_length
if args.num_beams:
_lowercase : List[str] = args.num_beams
if args.output_file_path:
_lowercase : Union[str, Any] = args.output_file_path
else:
_lowercase : Tuple = 'BART.onnx'
logger.info('Exporting model to ONNX' )
export_and_validate_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 677 | 1 |
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
if "model" in orig_key:
_lowercase : Optional[int] = orig_key.replace('model.' , '' )
if "norm1" in orig_key:
_lowercase : Optional[Any] = orig_key.replace('norm1' , 'attention.output.LayerNorm' )
if "norm2" in orig_key:
_lowercase : Dict = orig_key.replace('norm2' , 'output.LayerNorm' )
if "norm" in orig_key:
_lowercase : Optional[Any] = orig_key.replace('norm' , 'LayerNorm' )
if "transformer" in orig_key:
_lowercase : Optional[Any] = orig_key.split('.' )[0].split('_' )[-1]
_lowercase : int = orig_key.replace(F"""transformer_{layer_num}""" , F"""encoder.layer.{layer_num}""" )
if "mha.attn" in orig_key:
_lowercase : Union[str, Any] = orig_key.replace('mha.attn' , 'attention.self' )
if "mha" in orig_key:
_lowercase : Any = orig_key.replace('mha' , 'attention' )
if "W_q" in orig_key:
_lowercase : Dict = orig_key.replace('W_q' , 'self.query' )
if "W_k" in orig_key:
_lowercase : Optional[int] = orig_key.replace('W_k' , 'self.key' )
if "W_v" in orig_key:
_lowercase : List[str] = orig_key.replace('W_v' , 'self.value' )
if "ff1" in orig_key:
_lowercase : Optional[int] = orig_key.replace('ff1' , 'intermediate.dense' )
if "ff2" in orig_key:
_lowercase : Any = orig_key.replace('ff2' , 'output.dense' )
if "ff" in orig_key:
_lowercase : List[str] = orig_key.replace('ff' , 'output.dense' )
if "mlm_class" in orig_key:
_lowercase : Optional[Any] = orig_key.replace('mlm.mlm_class' , 'cls.predictions.decoder' )
if "mlm" in orig_key:
_lowercase : List[Any] = orig_key.replace('mlm' , 'cls.predictions.transform' )
if "cls" not in orig_key:
_lowercase : str = 'yoso.' + orig_key
return orig_key
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
for key in orig_state_dict.copy().keys():
_lowercase : Dict = orig_state_dict.pop(SCREAMING_SNAKE_CASE )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
_lowercase : Tuple = val
_lowercase : Union[str, Any] = orig_state_dict['cls.predictions.decoder.bias']
_lowercase : int = torch.arange(SCREAMING_SNAKE_CASE ).expand((1, -1) ) + 2
return orig_state_dict
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
_lowercase : List[Any] = torch.load(SCREAMING_SNAKE_CASE , map_location='cpu' )['model_state_dict']
_lowercase : Any = YosoConfig.from_json_file(SCREAMING_SNAKE_CASE )
_lowercase : List[str] = YosoForMaskedLM(SCREAMING_SNAKE_CASE )
_lowercase : str = convert_checkpoint_helper(config.max_position_embeddings , SCREAMING_SNAKE_CASE )
print(model.load_state_dict(SCREAMING_SNAKE_CASE ) )
model.eval()
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path", default=None, type=str, required=True, help="Path to YOSO pytorch checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for YOSO model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCamelCase = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 677 |
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class lowerCAmelCase_ ( __snake_case , __snake_case , unittest.TestCase ):
_UpperCamelCase : Union[str, Any] = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_UpperCamelCase : List[Any] = (
{
"feature-extraction": TFMobileBertModel,
"fill-mask": TFMobileBertForMaskedLM,
"question-answering": TFMobileBertForQuestionAnswering,
"text-classification": TFMobileBertForSequenceClassification,
"token-classification": TFMobileBertForTokenClassification,
"zero-shot": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_UpperCamelCase : int = False
_UpperCamelCase : Optional[int] = False
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ):
_lowercase : int = super()._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
if return_labels:
if model_class in get_values(_lowerCAmelCase ):
_lowercase : Optional[int] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class lowerCAmelCase_ ( __snake_case ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=1_3 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=9_9 , _lowerCAmelCase=3_2 , _lowerCAmelCase=3_2 , _lowerCAmelCase=2 , _lowerCAmelCase=4 , _lowerCAmelCase=3_7 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=5_1_2 , _lowerCAmelCase=1_6 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=None , ):
_lowercase : Optional[Any] = parent
_lowercase : str = batch_size
_lowercase : Optional[int] = seq_length
_lowercase : Tuple = is_training
_lowercase : List[Any] = use_input_mask
_lowercase : Optional[Any] = use_token_type_ids
_lowercase : Any = use_labels
_lowercase : str = vocab_size
_lowercase : List[Any] = hidden_size
_lowercase : Union[str, Any] = num_hidden_layers
_lowercase : Tuple = num_attention_heads
_lowercase : Optional[int] = intermediate_size
_lowercase : Tuple = hidden_act
_lowercase : Dict = hidden_dropout_prob
_lowercase : Optional[int] = attention_probs_dropout_prob
_lowercase : Tuple = max_position_embeddings
_lowercase : List[str] = type_vocab_size
_lowercase : Optional[Any] = type_sequence_label_size
_lowercase : List[Any] = initializer_range
_lowercase : List[str] = num_labels
_lowercase : Union[str, Any] = num_choices
_lowercase : List[str] = scope
_lowercase : Union[str, Any] = embedding_size
def __a ( self ):
_lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase : Optional[int] = None
if self.use_input_mask:
_lowercase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase : int = None
if self.use_token_type_ids:
_lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowercase : Dict = None
_lowercase : Any = None
_lowercase : int = None
if self.use_labels:
_lowercase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowercase : Dict = ids_tensor([self.batch_size] , self.num_choices )
_lowercase : Optional[Any] = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Union[str, Any] = TFMobileBertModel(config=_lowerCAmelCase )
_lowercase : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowercase : Union[str, Any] = model(_lowerCAmelCase )
_lowercase : Tuple = [input_ids, input_mask]
_lowercase : str = model(_lowerCAmelCase )
_lowercase : List[str] = model(_lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[int] = TFMobileBertForMaskedLM(config=_lowerCAmelCase )
_lowercase : Union[str, Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowercase : int = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Any = TFMobileBertForNextSentencePrediction(config=_lowerCAmelCase )
_lowercase : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowercase : Optional[int] = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[Any] = TFMobileBertForPreTraining(config=_lowerCAmelCase )
_lowercase : Tuple = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowercase : Union[str, Any] = model(_lowerCAmelCase )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[int] = self.num_labels
_lowercase : Tuple = TFMobileBertForSequenceClassification(config=_lowerCAmelCase )
_lowercase : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowercase : List[str] = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[Any] = self.num_choices
_lowercase : List[str] = TFMobileBertForMultipleChoice(config=_lowerCAmelCase )
_lowercase : Optional[int] = tf.tile(tf.expand_dims(_lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_lowercase : Optional[int] = tf.tile(tf.expand_dims(_lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_lowercase : Tuple = tf.tile(tf.expand_dims(_lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_lowercase : str = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
_lowercase : Union[str, Any] = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : List[str] = self.num_labels
_lowercase : int = TFMobileBertForTokenClassification(config=_lowerCAmelCase )
_lowercase : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowercase : List[str] = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Tuple = TFMobileBertForQuestionAnswering(config=_lowerCAmelCase )
_lowercase : Any = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowercase : int = model(_lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self ):
_lowercase : List[str] = self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : int = config_and_inputs
_lowercase : Tuple = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
def __a ( self ):
_lowercase : List[str] = TFMobileBertModelTest.TFMobileBertModelTester(self )
_lowercase : Union[str, Any] = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=3_7 )
def __a ( self ):
self.config_tester.run_common_tests()
def __a ( self ):
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*_lowerCAmelCase )
def __a ( self ):
_lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*_lowerCAmelCase )
def __a ( self ):
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*_lowerCAmelCase )
def __a ( self ):
_lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*_lowerCAmelCase )
def __a ( self ):
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*_lowerCAmelCase )
def __a ( self ):
_lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*_lowerCAmelCase )
def __a ( self ):
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*_lowerCAmelCase )
def __a ( self ):
_lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*_lowerCAmelCase )
@slow
def __a ( self ):
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
_lowercase : List[str] = TFMobileBertModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def __a ( self ):
_lowercase : Dict = TFMobileBertForPreTraining.from_pretrained('google/mobilebert-uncased' )
_lowercase : Union[str, Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
_lowercase : List[str] = model(_lowerCAmelCase )[0]
_lowercase : str = [1, 6, 3_0_5_2_2]
self.assertEqual(output.shape , _lowerCAmelCase )
_lowercase : List[Any] = tf.constant(
[
[
[-4.5_91_95_47, -9.24_82_95, -9.64_52_56],
[-6.7_30_61_75, -6.44_02_84, -6.6_05_28_37],
[-7.2_74_35_06, -6.7_84_79_15, -6.02_46_73],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _lowerCAmelCase , atol=1E-4 )
| 677 | 1 |
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
UpperCamelCase = datasets.utils.logging.get_logger(__name__)
@dataclass
class lowerCAmelCase_ ( datasets.BuilderConfig ):
_UpperCamelCase : int = 10000
_UpperCamelCase : Optional[List[str]] = None
_UpperCamelCase : Optional[datasets.Features] = None
class lowerCAmelCase_ ( datasets.ArrowBasedBuilder ):
_UpperCamelCase : Optional[int] = ParquetConfig
def __a ( self ):
return datasets.DatasetInfo(features=self.config.features )
def __a ( self , _lowerCAmelCase ):
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
_lowercase : Union[str, Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_lowerCAmelCase , (str, list, tuple) ):
_lowercase : Dict = data_files
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Tuple = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowercase : Union[str, Any] = [dl_manager.iter_files(_lowerCAmelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )]
_lowercase : Dict = []
for split_name, files in data_files.items():
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Dict = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowercase : Any = [dl_manager.iter_files(_lowerCAmelCase ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(_lowerCAmelCase ):
with open(_lowerCAmelCase , 'rb' ) as f:
_lowercase : str = datasets.Features.from_arrow_schema(pq.read_schema(_lowerCAmelCase ) )
break
splits.append(datasets.SplitGenerator(name=_lowerCAmelCase , gen_kwargs={'files': files} ) )
return splits
def __a ( self , _lowerCAmelCase ):
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_lowercase : str = table_cast(_lowerCAmelCase , self.info.features.arrow_schema )
return pa_table
def __a ( self , _lowerCAmelCase ):
_lowercase : str = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F"""Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'""" )
for file_idx, file in enumerate(itertools.chain.from_iterable(_lowerCAmelCase ) ):
with open(_lowerCAmelCase , 'rb' ) as f:
_lowercase : int = pq.ParquetFile(_lowerCAmelCase )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
_lowercase : List[Any] = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F"""{file_idx}_{batch_idx}""", self._cast_table(_lowerCAmelCase )
except ValueError as e:
logger.error(F"""Failed to read file '{file}' with error {type(_lowerCAmelCase )}: {e}""" )
raise
| 677 |
import qiskit
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> qiskit.result.counts.Counts:
_lowercase : Union[str, Any] = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
_lowercase : Optional[Any] = qiskit.QuantumCircuit(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
_lowercase : Optional[Any] = qiskit.execute(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , shots=1_000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCamelCase = single_qubit_measure(2, 2)
print(f'''Total count for various states are: {counts}''')
| 677 | 1 |
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCamelCase = "platform"
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , ) -> Dict:
if attention_mask is None:
_lowercase : str = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
_lowercase : List[Any] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
_lowercase : List[str] = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_lowercase : Optional[int] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_lowercase : str = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=1_3 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase=9_9 , _lowerCAmelCase=1_6 , _lowerCAmelCase=2 , _lowerCAmelCase=4 , _lowerCAmelCase=4 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=3_2 , _lowerCAmelCase=2 , _lowerCAmelCase=1 , _lowerCAmelCase=0 , _lowerCAmelCase=0.02 , ):
_lowercase : List[str] = parent
_lowercase : List[Any] = batch_size
_lowercase : Optional[Any] = seq_length
_lowercase : Optional[Any] = is_training
_lowercase : Tuple = use_labels
_lowercase : Dict = vocab_size
_lowercase : Any = hidden_size
_lowercase : Optional[Any] = num_hidden_layers
_lowercase : Union[str, Any] = num_attention_heads
_lowercase : Tuple = intermediate_size
_lowercase : Any = hidden_act
_lowercase : Optional[Any] = hidden_dropout_prob
_lowercase : Tuple = attention_probs_dropout_prob
_lowercase : Any = max_position_embeddings
_lowercase : str = eos_token_id
_lowercase : int = pad_token_id
_lowercase : Tuple = bos_token_id
_lowercase : List[Any] = initializer_range
def __a ( self ):
_lowercase : str = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
_lowercase : List[Any] = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
_lowercase : List[str] = shift_tokens_right(_lowerCAmelCase , 1 , 2 )
_lowercase : Tuple = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_lowerCAmelCase , )
_lowercase : List[Any] = prepare_blenderbot_inputs_dict(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return config, inputs_dict
def __a ( self ):
_lowercase , _lowercase : Union[str, Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[Any] = 2_0
_lowercase : List[Any] = model_class_name(_lowerCAmelCase )
_lowercase : List[Any] = model.encode(inputs_dict['input_ids'] )
_lowercase , _lowercase : int = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
_lowercase : Optional[Any] = model.init_cache(decoder_input_ids.shape[0] , _lowerCAmelCase , _lowerCAmelCase )
_lowercase : Optional[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' )
_lowercase : int = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_lowercase : Union[str, Any] = model.decode(
decoder_input_ids[:, :-1] , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase , decoder_position_ids=_lowerCAmelCase , )
_lowercase : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
_lowercase : int = model.decode(
decoder_input_ids[:, -1:] , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_lowerCAmelCase , )
_lowercase : List[Any] = model.decode(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : Optional[int] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Dict = 2_0
_lowercase : Any = model_class_name(_lowerCAmelCase )
_lowercase : int = model.encode(inputs_dict['input_ids'] )
_lowercase , _lowercase : Optional[int] = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
_lowercase : Union[str, Any] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_lowercase : List[str] = model.init_cache(decoder_input_ids.shape[0] , _lowerCAmelCase , _lowerCAmelCase )
_lowercase : int = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_lowercase : List[Any] = model.decode(
decoder_input_ids[:, :-1] , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase , decoder_position_ids=_lowerCAmelCase , )
_lowercase : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
_lowercase : Union[str, Any] = model.decode(
decoder_input_ids[:, -1:] , _lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_lowerCAmelCase , decoder_position_ids=_lowerCAmelCase , )
_lowercase : Dict = model.decode(_lowerCAmelCase , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase )
_lowercase : Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
@require_flax
class lowerCAmelCase_ ( unittest.TestCase ):
_UpperCamelCase : Tuple = 99
def __a ( self ):
_lowercase : Dict = np.array(
[
[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2],
[6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2],
[5, 9_7, 1_7, 3_9, 9_4, 4_0, 2],
[7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2],
[8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2],
[5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding
[6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2],
[5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2],
[4_8, 6_1, 9, 2_4, 7_1, 8_2, 2],
[2_6, 1, 6_0, 4_8, 2_2, 1_3, 2],
[2_1, 5, 6_2, 2_8, 1_4, 7_6, 2],
[4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2],
[7_0, 7_0, 5_0, 9, 2_8, 0, 2],
] , dtype=np.intaa , )
_lowercase : Union[str, Any] = input_ids.shape[0]
_lowercase : Optional[int] = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def __a ( self ):
_lowercase , _lowercase , _lowercase : int = self._get_config_and_data()
_lowercase : Union[str, Any] = FlaxBlenderbotSmallForConditionalGeneration(_lowerCAmelCase )
_lowercase : Union[str, Any] = lm_model(input_ids=_lowerCAmelCase )
_lowercase : str = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['logits'].shape , _lowerCAmelCase )
def __a ( self ):
_lowercase : Union[str, Any] = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , )
_lowercase : Optional[int] = FlaxBlenderbotSmallForConditionalGeneration(_lowerCAmelCase )
_lowercase : Optional[Any] = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa )
_lowercase : Optional[int] = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa )
_lowercase : Dict = lm_model(input_ids=_lowerCAmelCase , decoder_input_ids=_lowerCAmelCase )
_lowercase : Tuple = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['logits'].shape , _lowerCAmelCase )
def __a ( self ):
_lowercase : Dict = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa )
_lowercase : Union[str, Any] = shift_tokens_right(_lowerCAmelCase , 1 , 2 )
_lowercase : Dict = np.equal(_lowerCAmelCase , 1 ).astype(np.floataa ).sum()
_lowercase : Dict = np.equal(_lowerCAmelCase , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(_lowerCAmelCase , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class lowerCAmelCase_ ( __snake_case , unittest.TestCase , __snake_case ):
_UpperCamelCase : int = True
_UpperCamelCase : Any = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
_UpperCamelCase : Any = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def __a ( self ):
_lowercase : List[str] = FlaxBlenderbotSmallModelTester(self )
def __a ( self ):
_lowercase , _lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def __a ( self ):
_lowercase , _lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def __a ( self ):
_lowercase , _lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowercase : Any = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : str = model_class(_lowerCAmelCase )
@jax.jit
def encode_jitted(_lowerCAmelCase , _lowerCAmelCase=None , **_lowerCAmelCase ):
return model.encode(input_ids=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
with self.subTest('JIT Enabled' ):
_lowercase : Dict = encode_jitted(**_lowerCAmelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
_lowercase : Dict = encode_jitted(**_lowerCAmelCase ).to_tuple()
self.assertEqual(len(_lowerCAmelCase ) , len(_lowerCAmelCase ) )
for jitted_output, output in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def __a ( self ):
_lowercase , _lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowercase : int = model_class(_lowerCAmelCase )
_lowercase : int = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] )
_lowercase : List[Any] = {
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
return model.decode(
decoder_input_ids=_lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , encoder_outputs=_lowerCAmelCase , )
with self.subTest('JIT Enabled' ):
_lowercase : Dict = decode_jitted(**_lowerCAmelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
_lowercase : Any = decode_jitted(**_lowerCAmelCase ).to_tuple()
self.assertEqual(len(_lowerCAmelCase ) , len(_lowerCAmelCase ) )
for jitted_output, output in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def __a ( self ):
for model_class_name in self.all_model_classes:
_lowercase : Dict = model_class_name.from_pretrained('facebook/blenderbot_small-90M' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
_lowercase : Any = np.ones((1, 1) ) * model.config.eos_token_id
_lowercase : int = model(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
| 677 |
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCamelCase = "platform"
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , ) -> Dict:
if attention_mask is None:
_lowercase : str = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
_lowercase : List[Any] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
_lowercase : List[str] = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_lowercase : Optional[int] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_lowercase : str = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=1_3 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase=9_9 , _lowerCAmelCase=1_6 , _lowerCAmelCase=2 , _lowerCAmelCase=4 , _lowerCAmelCase=4 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=3_2 , _lowerCAmelCase=2 , _lowerCAmelCase=1 , _lowerCAmelCase=0 , _lowerCAmelCase=0.02 , ):
_lowercase : List[str] = parent
_lowercase : List[Any] = batch_size
_lowercase : Optional[Any] = seq_length
_lowercase : Optional[Any] = is_training
_lowercase : Tuple = use_labels
_lowercase : Dict = vocab_size
_lowercase : Any = hidden_size
_lowercase : Optional[Any] = num_hidden_layers
_lowercase : Union[str, Any] = num_attention_heads
_lowercase : Tuple = intermediate_size
_lowercase : Any = hidden_act
_lowercase : Optional[Any] = hidden_dropout_prob
_lowercase : Tuple = attention_probs_dropout_prob
_lowercase : Any = max_position_embeddings
_lowercase : str = eos_token_id
_lowercase : int = pad_token_id
_lowercase : Tuple = bos_token_id
_lowercase : List[Any] = initializer_range
def __a ( self ):
_lowercase : str = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
_lowercase : List[Any] = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
_lowercase : List[str] = shift_tokens_right(_lowerCAmelCase , 1 , 2 )
_lowercase : Tuple = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_lowerCAmelCase , )
_lowercase : List[Any] = prepare_blenderbot_inputs_dict(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return config, inputs_dict
def __a ( self ):
_lowercase , _lowercase : Union[str, Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[Any] = 2_0
_lowercase : List[Any] = model_class_name(_lowerCAmelCase )
_lowercase : List[Any] = model.encode(inputs_dict['input_ids'] )
_lowercase , _lowercase : int = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
_lowercase : Optional[Any] = model.init_cache(decoder_input_ids.shape[0] , _lowerCAmelCase , _lowerCAmelCase )
_lowercase : Optional[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' )
_lowercase : int = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_lowercase : Union[str, Any] = model.decode(
decoder_input_ids[:, :-1] , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase , decoder_position_ids=_lowerCAmelCase , )
_lowercase : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
_lowercase : int = model.decode(
decoder_input_ids[:, -1:] , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_lowerCAmelCase , )
_lowercase : List[Any] = model.decode(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : Optional[int] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Dict = 2_0
_lowercase : Any = model_class_name(_lowerCAmelCase )
_lowercase : int = model.encode(inputs_dict['input_ids'] )
_lowercase , _lowercase : Optional[int] = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
_lowercase : Union[str, Any] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_lowercase : List[str] = model.init_cache(decoder_input_ids.shape[0] , _lowerCAmelCase , _lowerCAmelCase )
_lowercase : int = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_lowercase : List[Any] = model.decode(
decoder_input_ids[:, :-1] , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase , decoder_position_ids=_lowerCAmelCase , )
_lowercase : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
_lowercase : Union[str, Any] = model.decode(
decoder_input_ids[:, -1:] , _lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_lowerCAmelCase , decoder_position_ids=_lowerCAmelCase , )
_lowercase : Dict = model.decode(_lowerCAmelCase , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase )
_lowercase : Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
@require_flax
class lowerCAmelCase_ ( unittest.TestCase ):
_UpperCamelCase : Tuple = 99
def __a ( self ):
_lowercase : Dict = np.array(
[
[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2],
[6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2],
[5, 9_7, 1_7, 3_9, 9_4, 4_0, 2],
[7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2],
[8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2],
[5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding
[6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2],
[5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2],
[4_8, 6_1, 9, 2_4, 7_1, 8_2, 2],
[2_6, 1, 6_0, 4_8, 2_2, 1_3, 2],
[2_1, 5, 6_2, 2_8, 1_4, 7_6, 2],
[4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2],
[7_0, 7_0, 5_0, 9, 2_8, 0, 2],
] , dtype=np.intaa , )
_lowercase : Union[str, Any] = input_ids.shape[0]
_lowercase : Optional[int] = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def __a ( self ):
_lowercase , _lowercase , _lowercase : int = self._get_config_and_data()
_lowercase : Union[str, Any] = FlaxBlenderbotSmallForConditionalGeneration(_lowerCAmelCase )
_lowercase : Union[str, Any] = lm_model(input_ids=_lowerCAmelCase )
_lowercase : str = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['logits'].shape , _lowerCAmelCase )
def __a ( self ):
_lowercase : Union[str, Any] = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , )
_lowercase : Optional[int] = FlaxBlenderbotSmallForConditionalGeneration(_lowerCAmelCase )
_lowercase : Optional[Any] = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa )
_lowercase : Optional[int] = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa )
_lowercase : Dict = lm_model(input_ids=_lowerCAmelCase , decoder_input_ids=_lowerCAmelCase )
_lowercase : Tuple = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['logits'].shape , _lowerCAmelCase )
def __a ( self ):
_lowercase : Dict = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa )
_lowercase : Union[str, Any] = shift_tokens_right(_lowerCAmelCase , 1 , 2 )
_lowercase : Dict = np.equal(_lowerCAmelCase , 1 ).astype(np.floataa ).sum()
_lowercase : Dict = np.equal(_lowerCAmelCase , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(_lowerCAmelCase , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class lowerCAmelCase_ ( __snake_case , unittest.TestCase , __snake_case ):
_UpperCamelCase : int = True
_UpperCamelCase : Any = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
_UpperCamelCase : Any = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def __a ( self ):
_lowercase : List[str] = FlaxBlenderbotSmallModelTester(self )
def __a ( self ):
_lowercase , _lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def __a ( self ):
_lowercase , _lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def __a ( self ):
_lowercase , _lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowercase : Any = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : str = model_class(_lowerCAmelCase )
@jax.jit
def encode_jitted(_lowerCAmelCase , _lowerCAmelCase=None , **_lowerCAmelCase ):
return model.encode(input_ids=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
with self.subTest('JIT Enabled' ):
_lowercase : Dict = encode_jitted(**_lowerCAmelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
_lowercase : Dict = encode_jitted(**_lowerCAmelCase ).to_tuple()
self.assertEqual(len(_lowerCAmelCase ) , len(_lowerCAmelCase ) )
for jitted_output, output in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def __a ( self ):
_lowercase , _lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowercase : int = model_class(_lowerCAmelCase )
_lowercase : int = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] )
_lowercase : List[Any] = {
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
return model.decode(
decoder_input_ids=_lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , encoder_outputs=_lowerCAmelCase , )
with self.subTest('JIT Enabled' ):
_lowercase : Dict = decode_jitted(**_lowerCAmelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
_lowercase : Any = decode_jitted(**_lowerCAmelCase ).to_tuple()
self.assertEqual(len(_lowerCAmelCase ) , len(_lowerCAmelCase ) )
for jitted_output, output in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def __a ( self ):
for model_class_name in self.all_model_classes:
_lowercase : Dict = model_class_name.from_pretrained('facebook/blenderbot_small-90M' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
_lowercase : Any = np.ones((1, 1) ) * model.config.eos_token_id
_lowercase : int = model(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
| 677 | 1 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
UpperCamelCase = {"tokenization_byt5": ["ByT5Tokenizer"]}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 677 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json",
"allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json",
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"
),
}
class lowerCAmelCase_ ( __snake_case ):
_UpperCamelCase : Dict = "longformer"
def __init__( self , _lowerCAmelCase = 5_1_2 , _lowerCAmelCase = 2 , _lowerCAmelCase = 1 , _lowerCAmelCase = 0 , _lowerCAmelCase = 2 , _lowerCAmelCase = 3_0_5_2_2 , _lowerCAmelCase = 7_6_8 , _lowerCAmelCase = 1_2 , _lowerCAmelCase = 1_2 , _lowerCAmelCase = 3_0_7_2 , _lowerCAmelCase = "gelu" , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 5_1_2 , _lowerCAmelCase = 2 , _lowerCAmelCase = 0.02 , _lowerCAmelCase = 1E-12 , _lowerCAmelCase = False , **_lowerCAmelCase , ):
super().__init__(pad_token_id=_lowerCAmelCase , **_lowerCAmelCase )
_lowercase : Optional[int] = attention_window
_lowercase : str = sep_token_id
_lowercase : Optional[Any] = bos_token_id
_lowercase : List[Any] = eos_token_id
_lowercase : Optional[Any] = vocab_size
_lowercase : List[Any] = hidden_size
_lowercase : Union[str, Any] = num_hidden_layers
_lowercase : Optional[int] = num_attention_heads
_lowercase : List[str] = hidden_act
_lowercase : List[str] = intermediate_size
_lowercase : List[Any] = hidden_dropout_prob
_lowercase : str = attention_probs_dropout_prob
_lowercase : Any = max_position_embeddings
_lowercase : int = type_vocab_size
_lowercase : Optional[int] = initializer_range
_lowercase : List[Any] = layer_norm_eps
_lowercase : List[str] = onnx_export
class lowerCAmelCase_ ( __snake_case ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase = "default" , _lowerCAmelCase = None ):
super().__init__(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
_lowercase : str = True
@property
def __a ( self ):
if self.task == "multiple-choice":
_lowercase : List[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowercase : int = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('global_attention_mask', dynamic_axis),
] )
@property
def __a ( self ):
_lowercase : Optional[int] = super().outputs
if self.task == "default":
_lowercase : List[str] = {0: 'batch'}
return outputs
@property
def __a ( self ):
return 1E-4
@property
def __a ( self ):
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 1_4 )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ):
_lowercase : int = super().generate_dummy_inputs(
preprocessor=_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
_lowercase : str = torch.zeros_like(inputs['input_ids'] )
# make every second token global
_lowercase : Any = 1
return inputs
| 677 | 1 |
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None ):
super().__init__()
_lowercase : Optional[Any] = pad_token_id
_lowercase : Any = max_length
_lowercase : str = vocab
_lowercase : Optional[Any] = merges
_lowercase : Tuple = BytePairTokenizer(_lowerCAmelCase , _lowerCAmelCase , sequence_length=_lowerCAmelCase )
@classmethod
def __a ( cls , _lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase ):
_lowercase : Optional[int] = [' '.join(_lowerCAmelCase ) for m in tokenizer.bpe_ranks.keys()]
_lowercase : Tuple = tokenizer.get_vocab()
return cls(_lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase )
@classmethod
def __a ( cls , _lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase ):
_lowercase : Optional[int] = GPTaTokenizer.from_pretrained(_lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase )
return cls.from_tokenizer(_lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase )
@classmethod
def __a ( cls , _lowerCAmelCase ):
return cls(**_lowerCAmelCase )
def __a ( self ):
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def __a ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
_lowercase : List[str] = self.tf_tokenizer(_lowerCAmelCase )
_lowercase : Any = tf.ones_like(_lowerCAmelCase )
if self.pad_token_id is not None:
# pad the tokens up to max length
_lowercase : List[str] = max_length if max_length is not None else self.max_length
if max_length is not None:
_lowercase , _lowercase : Dict = pad_model_inputs(
_lowerCAmelCase , max_seq_length=_lowerCAmelCase , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 677 |
from __future__ import annotations
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> bool:
return len(set(SCREAMING_SNAKE_CASE ) ) == len(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 677 | 1 |
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"microsoft/xprophetnet-large-wiki100-cased": (
"https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json"
),
}
class lowerCAmelCase_ ( __snake_case ):
_UpperCamelCase : List[str] = "xlm-prophetnet"
_UpperCamelCase : int = ["past_key_values"]
_UpperCamelCase : List[Any] = {
"num_attention_heads": "num_encoder_attention_heads",
}
def __init__( self , _lowerCAmelCase = 0.1 , _lowerCAmelCase = "gelu" , _lowerCAmelCase = 3_0_5_2_2 , _lowerCAmelCase = 1_0_2_4 , _lowerCAmelCase = 4_0_9_6 , _lowerCAmelCase = 1_2 , _lowerCAmelCase = 1_6 , _lowerCAmelCase = 4_0_9_6 , _lowerCAmelCase = 1_2 , _lowerCAmelCase = 1_6 , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 5_1_2 , _lowerCAmelCase = 0.02 , _lowerCAmelCase = True , _lowerCAmelCase = True , _lowerCAmelCase = 0 , _lowerCAmelCase = 2 , _lowerCAmelCase = 3_2 , _lowerCAmelCase = 1_2_8 , _lowerCAmelCase = False , _lowerCAmelCase = 0.0 , _lowerCAmelCase = True , _lowerCAmelCase = 0 , _lowerCAmelCase = 1 , _lowerCAmelCase = 2 , **_lowerCAmelCase , ):
_lowercase : Dict = vocab_size
_lowercase : List[str] = hidden_size
_lowercase : List[str] = encoder_ffn_dim
_lowercase : List[str] = num_encoder_layers
_lowercase : Optional[Any] = num_encoder_attention_heads
_lowercase : List[Any] = decoder_ffn_dim
_lowercase : int = num_decoder_layers
_lowercase : Union[str, Any] = num_decoder_attention_heads
_lowercase : int = max_position_embeddings
_lowercase : Optional[Any] = init_std # Normal(0, this parameter)
_lowercase : Optional[Any] = activation_function
# parameters for xlmprophetnet
_lowercase : Optional[Any] = ngram
_lowercase : Optional[int] = num_buckets
_lowercase : Optional[int] = relative_max_distance
_lowercase : Optional[int] = disable_ngram_loss
_lowercase : int = eps
# 3 Types of Dropout
_lowercase : int = attention_dropout
_lowercase : int = activation_dropout
_lowercase : int = dropout
_lowercase : Optional[int] = use_cache
super().__init__(
pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , is_encoder_decoder=_lowerCAmelCase , add_cross_attention=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , **_lowerCAmelCase , )
@property
def __a ( self ):
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def __a ( self , _lowerCAmelCase ):
raise NotImplementedError(
'This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and'
' `num_decoder_layers`.' )
| 677 |
import math
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0 , SCREAMING_SNAKE_CASE = 0 ) -> list:
_lowercase : List[str] = end or len(SCREAMING_SNAKE_CASE )
for i in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
_lowercase : Dict = i
_lowercase : str = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
_lowercase : Optional[Any] = array[temp_index - 1]
temp_index -= 1
_lowercase : Optional[Any] = temp_index_value
return array
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> None: # Max Heap
_lowercase : List[str] = index
_lowercase : List[str] = 2 * index + 1 # Left Node
_lowercase : Union[str, Any] = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
_lowercase : Any = left_index
if right_index < heap_size and array[largest] < array[right_index]:
_lowercase : str = right_index
if largest != index:
_lowercase , _lowercase : List[str] = array[largest], array[index]
heapify(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> list:
_lowercase : Optional[Any] = len(SCREAMING_SNAKE_CASE )
for i in range(n // 2 , -1 , -1 ):
heapify(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for i in range(n - 1 , 0 , -1 ):
_lowercase , _lowercase : List[Any] = array[0], array[i]
heapify(SCREAMING_SNAKE_CASE , 0 , SCREAMING_SNAKE_CASE )
return array
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
_lowercase : Optional[Any] = low
_lowercase : Tuple = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
_lowercase , _lowercase : Tuple = array[j], array[i]
i += 1
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> list:
if len(SCREAMING_SNAKE_CASE ) == 0:
return array
_lowercase : List[str] = 2 * math.ceil(math.loga(len(SCREAMING_SNAKE_CASE ) ) )
_lowercase : str = 16
return intro_sort(SCREAMING_SNAKE_CASE , 0 , len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> list:
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(SCREAMING_SNAKE_CASE )
max_depth -= 1
_lowercase : int = median_of_a(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , start + ((end - start) // 2) + 1 , end - 1 )
_lowercase : str = partition(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
intro_sort(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_lowercase : List[Any] = p
return insertion_sort(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase = input("Enter numbers separated by a comma : ").strip()
UpperCamelCase = [float(item) for item in user_input.split(",")]
print(sort(unsorted))
| 677 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["CLIPFeatureExtractor"]
UpperCamelCase = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 677 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["CLIPFeatureExtractor"]
UpperCamelCase = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 677 | 1 |
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase_ ( __snake_case , unittest.TestCase ):
_UpperCamelCase : List[str] = OpenAIGPTTokenizer
_UpperCamelCase : Optional[Any] = OpenAIGPTTokenizerFast
_UpperCamelCase : Dict = True
_UpperCamelCase : List[str] = False
def __a ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowercase : Any = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
_lowercase : Dict = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
_lowercase : List[str] = ['#version: 0.2', 'l o', 'lo w', 'e r</w>', '']
_lowercase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_lowercase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(_lowerCAmelCase ) )
def __a ( self , _lowerCAmelCase ):
return "lower newer", "lower newer"
def __a ( self ):
_lowercase : Any = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
_lowercase : str = 'lower'
_lowercase : Optional[int] = ['low', 'er</w>']
_lowercase : Any = tokenizer.tokenize(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : Dict = tokens + ['<unk>']
_lowercase : Optional[Any] = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , _lowerCAmelCase )
def __a ( self , _lowerCAmelCase=1_5 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_lowercase : List[str] = self.rust_tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
# Simple input
_lowercase : Any = 'This is a simple input'
_lowercase : Tuple = ['This is a simple input 1', 'This is a simple input 2']
_lowercase : Dict = ('This is a simple input', 'This is a pair')
_lowercase : Any = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(_lowerCAmelCase , tokenizer_r.encode , _lowerCAmelCase , max_length=_lowerCAmelCase , padding='max_length' )
# Simple input
self.assertRaises(_lowerCAmelCase , tokenizer_r.encode_plus , _lowerCAmelCase , max_length=_lowerCAmelCase , padding='max_length' )
# Simple input
self.assertRaises(
_lowerCAmelCase , tokenizer_r.batch_encode_plus , _lowerCAmelCase , max_length=_lowerCAmelCase , padding='max_length' , )
# Pair input
self.assertRaises(_lowerCAmelCase , tokenizer_r.encode , _lowerCAmelCase , max_length=_lowerCAmelCase , padding='max_length' )
# Pair input
self.assertRaises(_lowerCAmelCase , tokenizer_r.encode_plus , _lowerCAmelCase , max_length=_lowerCAmelCase , padding='max_length' )
# Pair input
self.assertRaises(
_lowerCAmelCase , tokenizer_r.batch_encode_plus , _lowerCAmelCase , max_length=_lowerCAmelCase , padding='max_length' , )
def __a ( self ):
pass
@require_ftfy
@require_spacy
@require_tokenizers
class lowerCAmelCase_ ( __snake_case ):
pass
| 677 |
from collections.abc import Sequence
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float:
return sum(c * (x**i) for i, c in enumerate(SCREAMING_SNAKE_CASE ) )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float:
_lowercase : Optional[Any] = 0.0
for coeff in reversed(SCREAMING_SNAKE_CASE ):
_lowercase : Optional[int] = result * x + coeff
return result
if __name__ == "__main__":
UpperCamelCase = (0.0, 0.0, 5.0, 9.3, 7.0)
UpperCamelCase = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 677 | 1 |
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Any:
print('Loading config file...' )
def flatten_yaml_as_dict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE="" , SCREAMING_SNAKE_CASE="." ):
_lowercase : Optional[int] = []
for k, v in d.items():
_lowercase : List[Any] = parent_key + sep + k if parent_key else k
if isinstance(SCREAMING_SNAKE_CASE , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , sep=SCREAMING_SNAKE_CASE ).items() )
else:
items.append((new_key, v) )
return dict(SCREAMING_SNAKE_CASE )
_lowercase : Optional[int] = argparse.Namespace()
with open(SCREAMING_SNAKE_CASE , 'r' ) as yaml_file:
try:
_lowercase : List[str] = yaml.load(SCREAMING_SNAKE_CASE , Loader=yaml.FullLoader )
_lowercase : str = flatten_yaml_as_dict(SCREAMING_SNAKE_CASE )
for k, v in flat_cfg.items():
setattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
except yaml.YAMLError as exc:
logger.error('Error while loading config file: {}. Error message: {}'.format(SCREAMING_SNAKE_CASE , str(SCREAMING_SNAKE_CASE ) ) )
return config
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
_lowercase : List[Any] = MobileViTVaConfig()
_lowercase : Dict = False
# dataset
if task_name.startswith('imagenet1k_' ):
_lowercase : Any = 1_000
if int(task_name.strip().split('_' )[-1] ) == 384:
_lowercase : Any = 384
else:
_lowercase : List[str] = 256
_lowercase : str = 'imagenet-1k-id2label.json'
elif task_name.startswith('imagenet21k_to_1k_' ):
_lowercase : Optional[Any] = 21_000
if int(task_name.strip().split('_' )[-1] ) == 384:
_lowercase : Dict = 384
else:
_lowercase : Optional[Any] = 256
_lowercase : str = 'imagenet-22k-id2label.json'
elif task_name.startswith('ade20k_' ):
_lowercase : int = 151
_lowercase : List[str] = 512
_lowercase : Optional[Any] = 'ade20k-id2label.json'
_lowercase : Any = True
elif task_name.startswith('voc_' ):
_lowercase : Union[str, Any] = 21
_lowercase : List[Any] = 512
_lowercase : List[Any] = 'pascal-voc-id2label.json'
_lowercase : Dict = True
# orig_config
_lowercase : List[str] = load_orig_config_file(SCREAMING_SNAKE_CASE )
assert getattr(SCREAMING_SNAKE_CASE , 'model.classification.name' , -1 ) == "mobilevit_v2", "Invalid model"
_lowercase : List[Any] = getattr(SCREAMING_SNAKE_CASE , 'model.classification.mitv2.width_multiplier' , 1.0 )
assert (
getattr(SCREAMING_SNAKE_CASE , 'model.classification.mitv2.attn_norm_layer' , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
_lowercase : Optional[Any] = getattr(SCREAMING_SNAKE_CASE , 'model.classification.activation.name' , 'swish' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
_lowercase : Union[str, Any] = getattr(SCREAMING_SNAKE_CASE , 'model.segmentation.output_stride' , 16 )
if "_deeplabv3" in task_name:
_lowercase : List[Any] = getattr(SCREAMING_SNAKE_CASE , 'model.segmentation.deeplabv3.aspp_rates' , [12, 24, 36] )
_lowercase : str = getattr(SCREAMING_SNAKE_CASE , 'model.segmentation.deeplabv3.aspp_out_channels' , 512 )
_lowercase : str = getattr(SCREAMING_SNAKE_CASE , 'model.segmentation.deeplabv3.aspp_dropout' , 0.1 )
# id2label
_lowercase : Tuple = 'huggingface/label-files'
_lowercase : Union[str, Any] = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
_lowercase : str = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
_lowercase : Dict = idalabel
_lowercase : Dict = {v: k for k, v in idalabel.items()}
return config
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]:
_lowercase : List[str] = dct.pop(SCREAMING_SNAKE_CASE )
_lowercase : Optional[int] = val
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> Any:
if base_model:
_lowercase : Optional[int] = ''
else:
_lowercase : int = 'mobilevitv2.'
_lowercase : Optional[Any] = []
for k in state_dict.keys():
if k[:8] == "encoder.":
_lowercase : str = k[8:]
else:
_lowercase : Union[str, Any] = k
if ".block." in k:
_lowercase : Tuple = k_new.replace('.block.' , '.' )
if ".conv." in k:
_lowercase : Dict = k_new.replace('.conv.' , '.convolution.' )
if ".norm." in k:
_lowercase : List[str] = k_new.replace('.norm.' , '.normalization.' )
if "conv_1." in k:
_lowercase : Optional[Any] = k_new.replace('conv_1.' , F"""{model_prefix}conv_stem.""" )
for i in [1, 2]:
if F"""layer_{i}.""" in k:
_lowercase : Tuple = k_new.replace(F"""layer_{i}.""" , F"""{model_prefix}encoder.layer.{i-1}.layer.""" )
if ".exp_1x1." in k:
_lowercase : List[Any] = k_new.replace('.exp_1x1.' , '.expand_1x1.' )
if ".red_1x1." in k:
_lowercase : Optional[int] = k_new.replace('.red_1x1.' , '.reduce_1x1.' )
for i in [3, 4, 5]:
if F"""layer_{i}.0.""" in k:
_lowercase : Any = k_new.replace(F"""layer_{i}.0.""" , F"""{model_prefix}encoder.layer.{i-1}.downsampling_layer.""" )
if F"""layer_{i}.1.local_rep.0.""" in k:
_lowercase : Tuple = k_new.replace(F"""layer_{i}.1.local_rep.0.""" , F"""{model_prefix}encoder.layer.{i-1}.conv_kxk.""" )
if F"""layer_{i}.1.local_rep.1.""" in k:
_lowercase : str = k_new.replace(F"""layer_{i}.1.local_rep.1.""" , F"""{model_prefix}encoder.layer.{i-1}.conv_1x1.""" )
for i in [3, 4, 5]:
if i == 3:
_lowercase : str = [0, 1]
elif i == 4:
_lowercase : int = [0, 1, 2, 3]
elif i == 5:
_lowercase : Union[str, Any] = [0, 1, 2]
for j in j_in:
if F"""layer_{i}.1.global_rep.{j}.""" in k:
_lowercase : int = k_new.replace(
F"""layer_{i}.1.global_rep.{j}.""" , F"""{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.""" )
if F"""layer_{i}.1.global_rep.{j+1}.""" in k:
_lowercase : int = k_new.replace(
F"""layer_{i}.1.global_rep.{j+1}.""" , F"""{model_prefix}encoder.layer.{i-1}.layernorm.""" )
if F"""layer_{i}.1.conv_proj.""" in k:
_lowercase : Any = k_new.replace(F"""layer_{i}.1.conv_proj.""" , F"""{model_prefix}encoder.layer.{i-1}.conv_projection.""" )
if "pre_norm_attn.0." in k:
_lowercase : Dict = k_new.replace('pre_norm_attn.0.' , 'layernorm_before.' )
if "pre_norm_attn.1." in k:
_lowercase : Tuple = k_new.replace('pre_norm_attn.1.' , 'attention.' )
if "pre_norm_ffn.0." in k:
_lowercase : str = k_new.replace('pre_norm_ffn.0.' , 'layernorm_after.' )
if "pre_norm_ffn.1." in k:
_lowercase : Optional[Any] = k_new.replace('pre_norm_ffn.1.' , 'ffn.conv1.' )
if "pre_norm_ffn.3." in k:
_lowercase : List[Any] = k_new.replace('pre_norm_ffn.3.' , 'ffn.conv2.' )
if "classifier.1." in k:
_lowercase : Optional[Any] = k_new.replace('classifier.1.' , 'classifier.' )
if "seg_head." in k:
_lowercase : Optional[Any] = k_new.replace('seg_head.' , 'segmentation_head.' )
if ".aspp_layer." in k:
_lowercase : Any = k_new.replace('.aspp_layer.' , '.' )
if ".aspp_pool." in k:
_lowercase : Union[str, Any] = k_new.replace('.aspp_pool.' , '.' )
rename_keys.append((k, k_new) )
return rename_keys
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> List[Any]:
_lowercase : Union[str, Any] = []
for k in state_dict.keys():
if k.startswith('seg_head.aux_head.' ):
keys_to_ignore.append(SCREAMING_SNAKE_CASE )
for k in keys_to_ignore:
state_dict.pop(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __magic_name__ ( ) -> Union[str, Any]:
_lowercase : Optional[Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
_lowercase : List[Any] = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
_lowercase : Optional[int] = get_mobilevitva_config(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# load original state_dict
_lowercase : Optional[Any] = torch.load(SCREAMING_SNAKE_CASE , map_location='cpu' )
# load huggingface model
if task_name.startswith('ade20k_' ) or task_name.startswith('voc_' ):
_lowercase : Tuple = MobileViTVaForSemanticSegmentation(SCREAMING_SNAKE_CASE ).eval()
_lowercase : List[str] = False
else:
_lowercase : Optional[int] = MobileViTVaForImageClassification(SCREAMING_SNAKE_CASE ).eval()
_lowercase : Any = False
# remove and rename some keys of load the original model
_lowercase : Optional[Any] = checkpoint
remove_unused_keys(SCREAMING_SNAKE_CASE )
_lowercase : Tuple = create_rename_keys(SCREAMING_SNAKE_CASE , base_model=SCREAMING_SNAKE_CASE )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# load modified state_dict
model.load_state_dict(SCREAMING_SNAKE_CASE )
# Check outputs on an image, prepared by MobileViTImageProcessor
_lowercase : List[str] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
_lowercase : Any = image_processor(images=prepare_img() , return_tensors='pt' )
_lowercase : int = model(**SCREAMING_SNAKE_CASE )
# verify classification model
if task_name.startswith('imagenet' ):
_lowercase : Union[str, Any] = outputs.logits
_lowercase : str = logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
if task_name.startswith('imagenet1k_256' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
_lowercase : Any = torch.tensor([-1.6_336E00, -7.3_204E-02, -5.1_883E-01] )
assert torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 )
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
print(F"""Saving model {task_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task",
default="imagenet1k_256",
type=str,
help=(
"Name of the task for which the MobileViTV2 model you'd like to convert is trained on . "
"\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n "
),
choices=[
"imagenet1k_256",
"imagenet1k_384",
"imagenet21k_to_1k_256",
"imagenet21k_to_1k_384",
"ade20k_deeplabv3",
"voc_deeplabv3",
],
)
parser.add_argument(
"--orig_checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)."
)
parser.add_argument("--orig_config_path", required=True, type=str, help="Path to the original config file.")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
UpperCamelCase = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 677 |
from __future__ import annotations
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase=None ):
_lowercase : int = data
_lowercase : Union[str, Any] = None
def __repr__( self ):
_lowercase : Dict = []
_lowercase : Tuple = self
while temp:
string_rep.append(F"""{temp.data}""" )
_lowercase : Optional[Any] = temp.next
return "->".join(_lowerCAmelCase )
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Any:
if not elements_list:
raise Exception('The Elements List is empty' )
_lowercase : Union[str, Any] = Node(elements_list[0] )
for i in range(1 , len(SCREAMING_SNAKE_CASE ) ):
_lowercase : Optional[int] = Node(elements_list[i] )
_lowercase : List[Any] = current.next
return head
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> None:
if head_node is not None and isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
print_reverse(head_node.next )
print(head_node.data )
def __magic_name__ ( ) -> List[str]:
from doctest import testmod
testmod()
_lowercase : int = make_linked_list([14, 52, 14, 12, 43] )
print('Linked List:' )
print(SCREAMING_SNAKE_CASE )
print('Elements in Reverse:' )
print_reverse(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 677 | 1 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase = {
"tokenizer_file": {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json",
},
}
UpperCamelCase = {
"gpt-neox-20b": 2_048,
}
class lowerCAmelCase_ ( __snake_case ):
_UpperCamelCase : Optional[Any] = VOCAB_FILES_NAMES
_UpperCamelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : List[Any] = ["input_ids", "attention_mask"]
def __init__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase="<|endoftext|>" , _lowerCAmelCase="<|endoftext|>" , _lowerCAmelCase="<|endoftext|>" , _lowerCAmelCase=False , **_lowerCAmelCase , ):
super().__init__(
_lowerCAmelCase , _lowerCAmelCase , tokenizer_file=_lowerCAmelCase , unk_token=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase , **_lowerCAmelCase , )
_lowercase : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , _lowerCAmelCase ) != add_prefix_space:
_lowercase : Union[str, Any] = getattr(_lowerCAmelCase , pre_tok_state.pop('type' ) )
_lowercase : int = add_prefix_space
_lowercase : List[str] = pre_tok_class(**_lowerCAmelCase )
_lowercase : Optional[Any] = add_prefix_space
def __a ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
_lowercase : Optional[Any] = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
def __a ( self , _lowerCAmelCase ):
_lowercase : Dict = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ) + [self.eos_token_id] )
if len(_lowerCAmelCase ) > self.model_max_length:
_lowercase : List[str] = input_ids[-self.model_max_length :]
return input_ids
| 677 |
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
UpperCamelCase = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
UpperCamelCase = typing.Union[np.floataa, int, float] # noqa: UP007
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> VectorOut:
return np.sqrt(np.sum((np.asarray(SCREAMING_SNAKE_CASE ) - np.asarray(SCREAMING_SNAKE_CASE )) ** 2 ) )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> VectorOut:
return sum((va - va) ** 2 for va, va in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) ** (1 / 2)
if __name__ == "__main__":
def __magic_name__ ( ) -> None:
from timeit import timeit
print('Without Numpy' )
print(
timeit(
'euclidean_distance_no_np([1, 2, 3], [4, 5, 6])' , number=10_000 , globals=globals() , ) )
print('With Numpy' )
print(
timeit(
'euclidean_distance([1, 2, 3], [4, 5, 6])' , number=10_000 , globals=globals() , ) )
benchmark()
| 677 | 1 |
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = "https://openaipublic.azureedge.net/jukebox/models/"
UpperCamelCase = {
"jukebox-1b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"1b_lyrics/prior_level_2.pth.tar",
],
"jukebox-5b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"5b_lyrics/prior_level_2.pth.tar",
],
}
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Tuple:
if key.endswith('.model.1.bias' ) and len(key.split('.' ) ) > 10:
_lowercase : Optional[int] = key.replace('.model.1.bias' , '.conv1d_1.bias' )
elif key.endswith('.model.1.weight' ) and len(key.split('.' ) ) > 10:
_lowercase : Tuple = key.replace('.model.1.weight' , '.conv1d_1.weight' )
elif key.endswith('.model.3.bias' ) and len(key.split('.' ) ) > 10:
_lowercase : str = key.replace('.model.3.bias' , '.conv1d_2.bias' )
elif key.endswith('.model.3.weight' ) and len(key.split('.' ) ) > 10:
_lowercase : str = key.replace('.model.3.weight' , '.conv1d_2.weight' )
if "conditioner_blocks.0." in key:
_lowercase : Dict = key.replace('conditioner_blocks.0' , 'conditioner_blocks' )
if "prime_prior" in key:
_lowercase : Optional[Any] = key.replace('prime_prior' , 'encoder' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
_lowercase : Tuple = key.replace('.emb.' , '.' )
if key.endswith('k' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('.k' , '.codebook' )
if "y_emb." in key:
return key.replace('y_emb.' , 'metadata_embedding.' )
if "x_emb.emb." in key:
_lowercase : int = key.replace('0.x_emb.emb' , 'embed_tokens' )
if "prime_state_ln" in key:
return key.replace('prime_state_ln' , 'encoder.final_layer_norm' )
if ".ln" in key:
return key.replace('.ln' , '.layer_norm' )
if "_ln" in key:
return key.replace('_ln' , '_layer_norm' )
if "prime_state_proj" in key:
return key.replace('prime_state_proj' , 'encoder.proj_in' )
if "prime_x_out" in key:
return key.replace('prime_x_out' , 'encoder.lm_head' )
if "prior.x_out" in key:
return key.replace('x_out' , 'fc_proj_out' )
if "x_emb" in key:
return key.replace('x_emb' , 'embed_tokens' )
return key
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
_lowercase : Any = {}
import re
_lowercase : str = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
_lowercase : Optional[int] = re.compile(
R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_lowercase : List[str] = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
_lowercase : Union[str, Any] = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
_lowercase : int = re.compile(
R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_lowercase : str = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
_lowercase : Tuple = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)' )
_lowercase : Any = re.compile(
R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_lowercase : List[str] = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(SCREAMING_SNAKE_CASE ):
_lowercase : Optional[int] = re_encoder_block_conv_in.match(SCREAMING_SNAKE_CASE )
_lowercase : List[str] = regex_match.groups()
_lowercase : List[Any] = int(groups[2] ) * 2 + int(groups[3] )
_lowercase : Union[str, Any] = F"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"""
_lowercase : Tuple = re_encoder_block_conv_in.sub(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif re_encoder_block_resnet.fullmatch(SCREAMING_SNAKE_CASE ):
_lowercase : Tuple = re_encoder_block_resnet.match(SCREAMING_SNAKE_CASE )
_lowercase : List[str] = regex_match.groups()
_lowercase : List[Any] = int(groups[2] ) * 2 + int(groups[3] )
_lowercase : List[Any] = {'1': 1, '3': 2}[groups[-2]]
_lowercase : List[Any] = F"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."""
_lowercase : Union[str, Any] = F"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
_lowercase : Optional[Any] = prefix + resnet_block
_lowercase : List[Any] = re_encoder_block_resnet.sub(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif re_encoder_block_proj_out.fullmatch(SCREAMING_SNAKE_CASE ):
_lowercase : Dict = re_encoder_block_proj_out.match(SCREAMING_SNAKE_CASE )
_lowercase : int = regex_match.groups()
_lowercase : int = F"""encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"""
_lowercase : List[Any] = re_encoder_block_proj_out.sub(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(SCREAMING_SNAKE_CASE ):
_lowercase : str = re_decoder_block_conv_out.match(SCREAMING_SNAKE_CASE )
_lowercase : List[Any] = regex_match.groups()
_lowercase : str = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowercase : Optional[int] = F"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"""
_lowercase : Any = re_decoder_block_conv_out.sub(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif re_decoder_block_resnet.fullmatch(SCREAMING_SNAKE_CASE ):
_lowercase : List[str] = re_decoder_block_resnet.match(SCREAMING_SNAKE_CASE )
_lowercase : Dict = regex_match.groups()
_lowercase : Any = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowercase : str = {'1': 1, '3': 2}[groups[-2]]
_lowercase : Optional[int] = F"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."""
_lowercase : Any = F"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
_lowercase : Optional[int] = prefix + resnet_block
_lowercase : str = re_decoder_block_resnet.sub(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif re_decoder_block_proj_in.fullmatch(SCREAMING_SNAKE_CASE ):
_lowercase : Union[str, Any] = re_decoder_block_proj_in.match(SCREAMING_SNAKE_CASE )
_lowercase : Optional[int] = regex_match.groups()
_lowercase : List[str] = F"""decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"""
_lowercase : str = re_decoder_block_proj_in.sub(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(SCREAMING_SNAKE_CASE ):
_lowercase : Optional[int] = re_prior_cond_conv_out.match(SCREAMING_SNAKE_CASE )
_lowercase : Union[str, Any] = regex_match.groups()
_lowercase : Union[str, Any] = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowercase : List[str] = F"""conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"""
_lowercase : Dict = re_prior_cond_conv_out.sub(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif re_prior_cond_resnet.fullmatch(SCREAMING_SNAKE_CASE ):
_lowercase : Optional[int] = re_prior_cond_resnet.match(SCREAMING_SNAKE_CASE )
_lowercase : str = regex_match.groups()
_lowercase : Optional[Any] = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowercase : int = {'1': 1, '3': 2}[groups[-2]]
_lowercase : List[str] = F"""conditioner_blocks.upsampler.upsample_block.{block_index}."""
_lowercase : List[str] = F"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
_lowercase : Optional[int] = prefix + resnet_block
_lowercase : List[str] = re_prior_cond_resnet.sub(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif re_prior_cond_proj_in.fullmatch(SCREAMING_SNAKE_CASE ):
_lowercase : Dict = re_prior_cond_proj_in.match(SCREAMING_SNAKE_CASE )
_lowercase : Dict = regex_match.groups()
_lowercase : Tuple = F"""conditioner_blocks.upsampler.proj_in.{groups[-1]}"""
_lowercase : Optional[int] = re_prior_cond_proj_in.sub(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# keep original key
else:
_lowercase : Union[str, Any] = original_key
_lowercase : int = replace_key(SCREAMING_SNAKE_CASE )
if F"""{key_prefix}.{key}""" not in model_state_dict or key is None:
print(F"""failed converting {original_key} to {key}, does not match""" )
# handle missmatched shape
elif value.shape != model_state_dict[F"""{key_prefix}.{key}"""].shape:
_lowercase : int = model_state_dict[F"""{key_prefix}.{key}"""]
print(F"""{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match""" )
_lowercase : int = original_key
_lowercase : int = original_key
_lowercase : Optional[int] = value
return new_dict
@torch.no_grad()
def __magic_name__ ( SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None ) -> int:
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F"""{pytorch_dump_folder_path}/{file.split('/' )[-1]}""" ):
_lowercase : Optional[int] = requests.get(F"""{PREFIX}{file}""" , allow_redirects=SCREAMING_SNAKE_CASE )
os.makedirs(F"""{pytorch_dump_folder_path}/""" , exist_ok=SCREAMING_SNAKE_CASE )
open(F"""{pytorch_dump_folder_path}/{file.split('/' )[-1]}""" , 'wb' ).write(r.content )
_lowercase : Union[str, Any] = MODEL_MAPPING[model_name.split('/' )[-1]]
_lowercase : Dict = JukeboxConfig.from_pretrained(SCREAMING_SNAKE_CASE )
_lowercase : str = JukeboxModel(SCREAMING_SNAKE_CASE )
_lowercase : Union[str, Any] = []
_lowercase : Optional[Any] = {}
for i, dict_name in enumerate(SCREAMING_SNAKE_CASE ):
_lowercase : str = torch.load(F"""{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}""" )['model']
_lowercase : int = {}
for k in old_dic.keys():
if k.endswith('.b' ):
_lowercase : Any = old_dic[k]
elif k.endswith('.w' ):
_lowercase : Optional[int] = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
_lowercase : Union[str, Any] = old_dic[k]
else:
_lowercase : int = old_dic[k]
_lowercase : List[str] = 'vqvae' if i == 0 else F"""priors.{3 - i}"""
_lowercase : List[Any] = fix_jukebox_keys(SCREAMING_SNAKE_CASE , model.state_dict() , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
weight_dict.append(SCREAMING_SNAKE_CASE )
_lowercase : int = weight_dict.pop(0 )
model.vqvae.load_state_dict(SCREAMING_SNAKE_CASE )
for i in range(len(SCREAMING_SNAKE_CASE ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
with open(F"""{pytorch_dump_folder_path}/mapping.json""" , 'w' ) as txtfile:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE )
return weight_dict
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="jukebox-5b-lyrics",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="jukebox-5b-lyrics-converted",
type=str,
help="Path to the output PyTorch model directory.",
)
UpperCamelCase = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 677 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase = {
"configuration_swinv2": ["SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Swinv2Config"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Swinv2ForImageClassification",
"Swinv2ForMaskedImageModeling",
"Swinv2Model",
"Swinv2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 677 | 1 |
def __magic_name__ ( SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 1_000 ) -> int:
_lowercase : List[str] = 1
_lowercase : str = 0
for divide_by_number in range(SCREAMING_SNAKE_CASE , digit + 1 ):
_lowercase : list[int] = []
_lowercase : Dict = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(SCREAMING_SNAKE_CASE ):
_lowercase : int = len(SCREAMING_SNAKE_CASE )
_lowercase : int = divide_by_number
else:
has_been_divided.append(SCREAMING_SNAKE_CASE )
_lowercase : List[Any] = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 677 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
UpperCamelCase = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase = {
"vocab_file": {
"google/electra-small-generator": (
"https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt"
),
"google/electra-base-generator": "https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt",
"google/electra-large-generator": (
"https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt"
),
"google/electra-small-discriminator": (
"https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt"
),
"google/electra-base-discriminator": (
"https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt"
),
"google/electra-large-discriminator": (
"https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"google/electra-small-generator": (
"https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json"
),
"google/electra-base-generator": (
"https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json"
),
"google/electra-large-generator": (
"https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json"
),
"google/electra-small-discriminator": (
"https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json"
),
"google/electra-base-discriminator": (
"https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json"
),
"google/electra-large-discriminator": (
"https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json"
),
},
}
UpperCamelCase = {
"google/electra-small-generator": 512,
"google/electra-base-generator": 512,
"google/electra-large-generator": 512,
"google/electra-small-discriminator": 512,
"google/electra-base-discriminator": 512,
"google/electra-large-discriminator": 512,
}
UpperCamelCase = {
"google/electra-small-generator": {"do_lower_case": True},
"google/electra-base-generator": {"do_lower_case": True},
"google/electra-large-generator": {"do_lower_case": True},
"google/electra-small-discriminator": {"do_lower_case": True},
"google/electra-base-discriminator": {"do_lower_case": True},
"google/electra-large-discriminator": {"do_lower_case": True},
}
class lowerCAmelCase_ ( __snake_case ):
_UpperCamelCase : Any = VOCAB_FILES_NAMES
_UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : str = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : List[str] = ElectraTokenizer
def __init__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase="[UNK]" , _lowerCAmelCase="[SEP]" , _lowerCAmelCase="[PAD]" , _lowerCAmelCase="[CLS]" , _lowerCAmelCase="[MASK]" , _lowerCAmelCase=True , _lowerCAmelCase=None , **_lowerCAmelCase , ):
super().__init__(
_lowerCAmelCase , tokenizer_file=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , tokenize_chinese_chars=_lowerCAmelCase , strip_accents=_lowerCAmelCase , **_lowerCAmelCase , )
_lowercase : Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _lowerCAmelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , _lowerCAmelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _lowerCAmelCase ) != tokenize_chinese_chars
):
_lowercase : Any = getattr(_lowerCAmelCase , normalizer_state.pop('type' ) )
_lowercase : Dict = do_lower_case
_lowercase : Optional[Any] = strip_accents
_lowercase : Any = tokenize_chinese_chars
_lowercase : Tuple = normalizer_class(**_lowerCAmelCase )
_lowercase : Union[str, Any] = do_lower_case
def __a ( self , _lowerCAmelCase , _lowerCAmelCase=None ):
_lowercase : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __a ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
_lowercase : str = [self.sep_token_id]
_lowercase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
_lowercase : Any = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
| 677 | 1 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class lowerCAmelCase_ ( unittest.TestCase ):
def __a ( self ):
_lowercase : Optional[Any] = inspect.getfile(accelerate.test_utils )
_lowercase : Dict = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'external_deps', 'test_metrics.py'] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
_lowercase : Tuple = test_metrics
@require_cpu
def __a ( self ):
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def __a ( self ):
debug_launcher(self.test_metrics.main )
@require_single_gpu
def __a ( self ):
self.test_metrics.main()
@require_multi_gpu
def __a ( self ):
print(F"""Found {torch.cuda.device_count()} devices.""" )
_lowercase : Tuple = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_lowerCAmelCase , env=os.environ.copy() )
| 677 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 677 | 1 |
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> List[str]:
if isinstance(SCREAMING_SNAKE_CASE , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class lowerCAmelCase_ :
def __a ( self , _lowerCAmelCase , _lowerCAmelCase ):
pass
def __a ( self ):
pass
def __a ( self ):
pass
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Union[str, Any] = np.abs((a - b) ).max()
self.assertLessEqual(_lowerCAmelCase , _lowerCAmelCase , F"""Difference between torch and flax is {diff} (>= {tol}).""" )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , **_lowerCAmelCase ):
_lowercase : Any = VisionTextDualEncoderConfig.from_vision_text_configs(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : Optional[int] = FlaxVisionTextDualEncoderModel(_lowerCAmelCase )
_lowercase : Union[str, Any] = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], config.projection_dim) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , **_lowerCAmelCase ):
_lowercase , _lowercase : Optional[Any] = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : Union[str, Any] = {'vision_model': vision_model, 'text_model': text_model}
_lowercase : Union[str, Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_lowerCAmelCase )
_lowercase : Union[str, Any] = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , **_lowerCAmelCase ):
_lowercase , _lowercase : List[str] = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : str = {'vision_model': vision_model, 'text_model': text_model}
_lowercase : Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_lowerCAmelCase )
_lowercase : Optional[int] = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
_lowercase : List[str] = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowerCAmelCase )
_lowercase : int = FlaxVisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase )
_lowercase : str = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
_lowercase : Dict = after_output[0]
_lowercase : List[str] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCAmelCase , 1E-3 )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , **_lowerCAmelCase ):
_lowercase , _lowercase : Optional[int] = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : str = {'vision_model': vision_model, 'text_model': text_model}
_lowercase : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_lowerCAmelCase )
_lowercase : Optional[Any] = model(
input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , output_attentions=_lowerCAmelCase )
_lowercase : Dict = output.vision_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowercase : List[str] = to_atuple(vision_model.config.image_size )
_lowercase : Any = to_atuple(vision_model.config.patch_size )
_lowercase : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_lowercase : str = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
_lowercase : int = output.text_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
pt_model.to(_lowerCAmelCase )
pt_model.eval()
# prepare inputs
_lowercase : Optional[int] = inputs_dict
_lowercase : Any = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
_lowercase : str = pt_model(**_lowerCAmelCase ).to_tuple()
_lowercase : int = fx_model(**_lowerCAmelCase ).to_tuple()
self.assertEqual(len(_lowerCAmelCase ) , len(_lowerCAmelCase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(_lowerCAmelCase , pt_output.numpy() , 4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(_lowerCAmelCase )
_lowercase : Dict = FlaxVisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase , from_pt=_lowerCAmelCase )
_lowercase : List[str] = fx_model_loaded(**_lowerCAmelCase ).to_tuple()
self.assertEqual(len(_lowerCAmelCase ) , len(_lowerCAmelCase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(_lowerCAmelCase , pt_output.numpy() , 4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(_lowerCAmelCase )
_lowercase : Dict = VisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase , from_flax=_lowerCAmelCase )
pt_model_loaded.to(_lowerCAmelCase )
pt_model_loaded.eval()
with torch.no_grad():
_lowercase : Optional[int] = pt_model_loaded(**_lowerCAmelCase ).to_tuple()
self.assertEqual(len(_lowerCAmelCase ) , len(_lowerCAmelCase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(_lowerCAmelCase , pt_output_loaded.numpy() , 4E-2 )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Union[str, Any] = VisionTextDualEncoderConfig.from_vision_text_configs(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : Optional[int] = VisionTextDualEncoderModel(_lowerCAmelCase )
_lowercase : int = FlaxVisionTextDualEncoderModel(_lowerCAmelCase )
_lowercase : int = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , _lowerCAmelCase )
_lowercase : Any = fx_state
self.check_pt_flax_equivalence(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : List[str] = VisionTextDualEncoderConfig.from_vision_text_configs(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : Dict = VisionTextDualEncoderModel(_lowerCAmelCase )
_lowercase : str = FlaxVisionTextDualEncoderModel(_lowerCAmelCase )
_lowercase : str = load_flax_weights_in_pytorch_model(_lowerCAmelCase , fx_model.params )
self.check_pt_flax_equivalence(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def __a ( self ):
_lowercase : str = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_lowerCAmelCase )
def __a ( self ):
_lowercase : List[str] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_lowerCAmelCase )
def __a ( self ):
_lowercase : int = self.prepare_config_and_inputs()
self.check_save_load(**_lowerCAmelCase )
def __a ( self ):
_lowercase : int = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_lowerCAmelCase )
@is_pt_flax_cross_test
def __a ( self ):
_lowercase : Optional[int] = self.prepare_config_and_inputs()
_lowercase : Optional[Any] = config_inputs_dict.pop('vision_config' )
_lowercase : List[Any] = config_inputs_dict.pop('text_config' )
_lowercase : Any = config_inputs_dict
self.check_equivalence_pt_to_flax(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
self.check_equivalence_flax_to_pt(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
@slow
def __a ( self ):
_lowercase , _lowercase : Dict = self.get_pretrained_model_and_inputs()
_lowercase : str = model_a(**_lowerCAmelCase )
_lowercase : str = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_lowerCAmelCase )
_lowercase : List[Any] = FlaxVisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase )
_lowercase : List[Any] = model_a(**_lowerCAmelCase )
_lowercase : List[Any] = after_outputs[0]
_lowercase : Union[str, Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCAmelCase , 1E-5 )
@require_flax
class lowerCAmelCase_ ( __snake_case , unittest.TestCase ):
def __a ( self ):
_lowercase : Any = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-vit' , 'hf-internal-testing/tiny-bert' , vision_from_pt=_lowerCAmelCase , text_from_pt=_lowerCAmelCase , )
_lowercase : Optional[Any] = 1_3
_lowercase : Optional[Any] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
_lowercase : Optional[Any] = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
_lowercase : Dict = random_attention_mask([batch_size, 4] )
_lowercase : Tuple = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def __a ( self , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[int] = FlaxViTModel(_lowerCAmelCase )
_lowercase : Optional[int] = FlaxBertModel(_lowerCAmelCase )
return vision_model, text_model
def __a ( self ):
_lowercase : Dict = FlaxViTModelTester(self )
_lowercase : int = FlaxBertModelTester(self )
_lowercase : Optional[int] = vit_model_tester.prepare_config_and_inputs()
_lowercase : Tuple = bert_model_tester.prepare_config_and_inputs()
_lowercase , _lowercase : List[str] = vision_config_and_inputs
_lowercase , _lowercase , _lowercase , _lowercase : int = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class lowerCAmelCase_ ( __snake_case , unittest.TestCase ):
def __a ( self ):
_lowercase : Dict = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-clip' , 'hf-internal-testing/tiny-bert' , vision_from_pt=_lowerCAmelCase , text_from_pt=_lowerCAmelCase , )
_lowercase : Optional[Any] = 1_3
_lowercase : int = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
_lowercase : Tuple = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
_lowercase : Any = random_attention_mask([batch_size, 4] )
_lowercase : Optional[Any] = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def __a ( self , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : str = FlaxCLIPVisionModel(_lowerCAmelCase )
_lowercase : Dict = FlaxBertModel(_lowerCAmelCase )
return vision_model, text_model
def __a ( self ):
_lowercase : int = FlaxCLIPVisionModelTester(self )
_lowercase : Any = FlaxBertModelTester(self )
_lowercase : List[Any] = clip_model_tester.prepare_config_and_inputs()
_lowercase : List[Any] = bert_model_tester.prepare_config_and_inputs()
_lowercase , _lowercase : Dict = vision_config_and_inputs
_lowercase , _lowercase , _lowercase , _lowercase : Union[str, Any] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def __a ( self ):
_lowercase : List[str] = FlaxVisionTextDualEncoderModel.from_pretrained('clip-italian/clip-italian' , logit_scale_init_value=1.0 )
_lowercase : Any = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian' )
_lowercase : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
_lowercase : int = processor(
text=['una foto di un gatto', 'una foto di un cane'] , images=_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors='np' )
_lowercase : int = model(**_lowerCAmelCase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
_lowercase : int = np.array([[1.2_28_47_27, 0.3_10_41_22]] )
self.assertTrue(np.allclose(outputs.logits_per_image , _lowerCAmelCase , atol=1E-3 ) )
| 677 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
for attribute in key.split('.' ):
_lowercase : Union[str, Any] = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if weight_type is not None:
_lowercase : Optional[int] = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).shape
else:
_lowercase : Optional[Any] = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
_lowercase : List[str] = value
elif weight_type == "weight_g":
_lowercase : Any = value
elif weight_type == "weight_v":
_lowercase : Tuple = value
elif weight_type == "bias":
_lowercase : List[str] = value
else:
_lowercase : Dict = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
_lowercase : Optional[int] = []
_lowercase : Optional[int] = fairseq_model.state_dict()
_lowercase : Dict = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
_lowercase : Dict = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == 'group' , )
_lowercase : int = True
else:
for key, mapped_key in MAPPING.items():
_lowercase : Union[str, Any] = 'hubert.' + mapped_key if (is_finetuned and mapped_key != 'lm_head') else mapped_key
if key in name or (key.split('w2v_model.' )[-1] == name.split('.' )[0] and not is_finetuned):
_lowercase : Union[str, Any] = True
if "*" in mapped_key:
_lowercase : Dict = name.split(SCREAMING_SNAKE_CASE )[0].split('.' )[-2]
_lowercase : Dict = mapped_key.replace('*' , SCREAMING_SNAKE_CASE )
if "weight_g" in name:
_lowercase : Optional[int] = 'weight_g'
elif "weight_v" in name:
_lowercase : Optional[Any] = 'weight_v'
elif "weight" in name:
_lowercase : str = 'weight'
elif "bias" in name:
_lowercase : Any = 'bias'
else:
_lowercase : str = None
set_recursively(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE )
logger.warning(F"""Unused weights: {unused_weights}""" )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
_lowercase : Any = full_name.split('conv_layers.' )[-1]
_lowercase : Any = name.split('.' )
_lowercase : Optional[Any] = int(items[0] )
_lowercase : List[str] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
_lowercase : Optional[Any] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
_lowercase : List[str] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
_lowercase : Union[str, Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
_lowercase : List[Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True ) -> Optional[Any]:
if config_path is not None:
_lowercase : Optional[int] = HubertConfig.from_pretrained(SCREAMING_SNAKE_CASE )
else:
_lowercase : List[Any] = HubertConfig()
if is_finetuned:
if dict_path:
_lowercase : List[str] = Dictionary.load(SCREAMING_SNAKE_CASE )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_lowercase : Dict = target_dict.pad_index
_lowercase : Dict = target_dict.bos_index
_lowercase : Tuple = target_dict.eos_index
_lowercase : List[Any] = len(target_dict.symbols )
_lowercase : Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE , 'vocab.json' )
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(SCREAMING_SNAKE_CASE ) )
return
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
with open(SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(target_dict.indices , SCREAMING_SNAKE_CASE )
_lowercase : int = WavaVecaCTCTokenizer(
SCREAMING_SNAKE_CASE , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=SCREAMING_SNAKE_CASE , )
_lowercase : str = True if config.feat_extract_norm == 'layer' else False
_lowercase : Optional[int] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , )
_lowercase : Tuple = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
_lowercase : List[Any] = HubertForCTC(SCREAMING_SNAKE_CASE )
else:
_lowercase : List[Any] = HubertModel(SCREAMING_SNAKE_CASE )
if is_finetuned:
_lowercase , _lowercase , _lowercase : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
_lowercase , _lowercase , _lowercase : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
_lowercase : int = model[0].eval()
recursively_load_weights(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
UpperCamelCase = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 677 | 1 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def __a ( self ):
_lowercase : Dict = TFAutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' )
_lowercase : Union[str, Any] = AutoTokenizer.from_pretrained('google/mt5-small' )
_lowercase : str = tokenizer('Hello there' , return_tensors='tf' ).input_ids
_lowercase : str = tokenizer('Hi I am' , return_tensors='tf' ).input_ids
_lowercase : Union[str, Any] = model(_lowerCAmelCase , labels=_lowerCAmelCase ).loss
_lowercase : Union[str, Any] = -tf.math.reduce_mean(_lowerCAmelCase ).numpy()
_lowercase : Dict = -21.22_81_68
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 )
| 677 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=1_3 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=9_9 , _lowerCAmelCase=3_2 , _lowerCAmelCase=2 , _lowerCAmelCase=4 , _lowerCAmelCase=3_7 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=5_1_2 , _lowerCAmelCase=1_6 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=None , _lowerCAmelCase=1_0_0_0 , ):
_lowercase : List[str] = parent
_lowercase : Optional[Any] = batch_size
_lowercase : str = seq_length
_lowercase : Dict = is_training
_lowercase : Optional[int] = use_input_mask
_lowercase : List[Any] = use_token_type_ids
_lowercase : Union[str, Any] = use_labels
_lowercase : Optional[Any] = vocab_size
_lowercase : Optional[Any] = hidden_size
_lowercase : str = num_hidden_layers
_lowercase : Tuple = num_attention_heads
_lowercase : Optional[Any] = intermediate_size
_lowercase : Optional[Any] = hidden_act
_lowercase : Union[str, Any] = hidden_dropout_prob
_lowercase : Union[str, Any] = attention_probs_dropout_prob
_lowercase : int = max_position_embeddings
_lowercase : str = type_vocab_size
_lowercase : Tuple = type_sequence_label_size
_lowercase : Dict = initializer_range
_lowercase : List[Any] = num_labels
_lowercase : List[str] = num_choices
_lowercase : Dict = scope
_lowercase : List[Any] = range_bbox
def __a ( self ):
_lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
_lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_lowercase : List[str] = bbox[i, j, 3]
_lowercase : Optional[int] = bbox[i, j, 1]
_lowercase : int = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_lowercase : Dict = bbox[i, j, 2]
_lowercase : Dict = bbox[i, j, 0]
_lowercase : int = t
_lowercase : Union[str, Any] = tf.convert_to_tensor(_lowerCAmelCase )
_lowercase : Any = None
if self.use_input_mask:
_lowercase : int = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase : Tuple = None
if self.use_token_type_ids:
_lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowercase : Tuple = None
_lowercase : Union[str, Any] = None
_lowercase : List[str] = None
if self.use_labels:
_lowercase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowercase : str = ids_tensor([self.batch_size] , self.num_choices )
_lowercase : Any = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[Any] = TFLayoutLMModel(config=_lowerCAmelCase )
_lowercase : List[Any] = model(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
_lowercase : List[Any] = model(_lowerCAmelCase , _lowerCAmelCase , token_type_ids=_lowerCAmelCase )
_lowercase : List[str] = model(_lowerCAmelCase , _lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[Any] = TFLayoutLMForMaskedLM(config=_lowerCAmelCase )
_lowercase : Any = model(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : str = self.num_labels
_lowercase : Tuple = TFLayoutLMForSequenceClassification(config=_lowerCAmelCase )
_lowercase : int = model(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Any = self.num_labels
_lowercase : Optional[int] = TFLayoutLMForTokenClassification(config=_lowerCAmelCase )
_lowercase : Union[str, Any] = model(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Union[str, Any] = TFLayoutLMForQuestionAnswering(config=_lowerCAmelCase )
_lowercase : str = model(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self ):
_lowercase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : List[Any] = config_and_inputs
_lowercase : Optional[Any] = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( __snake_case , __snake_case , unittest.TestCase ):
_UpperCamelCase : Optional[int] = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
_UpperCamelCase : Union[str, Any] = (
{
"feature-extraction": TFLayoutLMModel,
"fill-mask": TFLayoutLMForMaskedLM,
"text-classification": TFLayoutLMForSequenceClassification,
"token-classification": TFLayoutLMForTokenClassification,
"zero-shot": TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
_UpperCamelCase : str = False
_UpperCamelCase : List[str] = True
_UpperCamelCase : Tuple = 10
def __a ( self ):
_lowercase : Optional[int] = TFLayoutLMModelTester(self )
_lowercase : str = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=3_7 )
def __a ( self ):
self.config_tester.run_common_tests()
def __a ( self ):
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def __a ( self ):
_lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowerCAmelCase )
def __a ( self ):
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowerCAmelCase )
def __a ( self ):
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowerCAmelCase )
def __a ( self ):
_lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowerCAmelCase )
@slow
def __a ( self ):
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : List[Any] = TFLayoutLMModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@unittest.skip('Onnx compliancy broke with TF 2.10' )
def __a ( self ):
pass
def __magic_name__ ( ) -> Optional[int]:
# Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on:
# fmt: off
_lowercase : Optional[Any] = tf.convert_to_tensor([[101,1_019,1_014,1_016,1_037,12_849,4_747,1_004,14_246,2_278,5_439,4_524,5_002,2_930,2_193,2_930,4_341,3_208,1_005,1_055,2_171,2_848,11_300,3_531,102],[101,4_070,4_034,7_020,1_024,3_058,1_015,1_013,2_861,1_013,6_070,19_274,2_772,6_205,27_814,16_147,16_147,4_343,2_047,10_283,10_969,14_389,1_012,2_338,102]] ) # noqa: E231
_lowercase : Tuple = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
_lowercase : Optional[int] = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1_000,1_000,1_000,1_000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1_000,1_000,1_000,1_000]]] ) # noqa: E231
_lowercase : int = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
_lowercase : Union[str, Any] = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def __a ( self ):
_lowercase : Tuple = TFLayoutLMModel.from_pretrained('microsoft/layoutlm-base-uncased' )
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase : Optional[int] = prepare_layoutlm_batch_inputs()
# forward pass
_lowercase : Tuple = model(input_ids=_lowerCAmelCase , bbox=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
# test the sequence output on [0, :3, :3]
_lowercase : Optional[Any] = tf.convert_to_tensor(
[[0.17_85, -0.19_47, -0.04_25], [-0.32_54, -0.28_07, 0.25_53], [-0.53_91, -0.33_22, 0.33_64]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _lowerCAmelCase , atol=1E-3 ) )
# test the pooled output on [1, :3]
_lowercase : Optional[int] = tf.convert_to_tensor([-0.65_80, -0.02_14, 0.85_52] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , _lowerCAmelCase , atol=1E-3 ) )
@slow
def __a ( self ):
# initialize model with randomly initialized sequence classification head
_lowercase : Optional[Any] = TFLayoutLMForSequenceClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=2 )
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase : Optional[Any] = prepare_layoutlm_batch_inputs()
# forward pass
_lowercase : Any = model(
input_ids=_lowerCAmelCase , bbox=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
_lowercase : List[Any] = outputs.loss
_lowercase : Any = (2,)
self.assertEqual(loss.shape , _lowerCAmelCase )
# test the shape of the logits
_lowercase : str = outputs.logits
_lowercase : Dict = (2, 2)
self.assertEqual(logits.shape , _lowerCAmelCase )
@slow
def __a ( self ):
# initialize model with randomly initialized token classification head
_lowercase : Dict = TFLayoutLMForTokenClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=1_3 )
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase : str = prepare_layoutlm_batch_inputs()
# forward pass
_lowercase : Dict = model(
input_ids=_lowerCAmelCase , bbox=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
# test the shape of the logits
_lowercase : Dict = outputs.logits
_lowercase : Optional[Any] = tf.convert_to_tensor((2, 2_5, 1_3) )
self.assertEqual(logits.shape , _lowerCAmelCase )
@slow
def __a ( self ):
# initialize model with randomly initialized token classification head
_lowercase : Union[str, Any] = TFLayoutLMForQuestionAnswering.from_pretrained('microsoft/layoutlm-base-uncased' )
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase : List[Any] = prepare_layoutlm_batch_inputs()
# forward pass
_lowercase : int = model(input_ids=_lowerCAmelCase , bbox=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
# test the shape of the logits
_lowercase : Any = tf.convert_to_tensor((2, 2_5) )
self.assertEqual(outputs.start_logits.shape , _lowerCAmelCase )
self.assertEqual(outputs.end_logits.shape , _lowerCAmelCase )
| 677 | 1 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
UpperCamelCase = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase_ :
_UpperCamelCase : Optional[int] = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
_UpperCamelCase : bool = field(
default=__snake_case , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
_UpperCamelCase : bool = field(
default=__snake_case , metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
} , )
_UpperCamelCase : Optional[int] = field(
default=__snake_case , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
_UpperCamelCase : Optional[int] = field(
default=__snake_case , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
_UpperCamelCase : Optional[int] = field(
default=__snake_case , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
)
} , )
@dataclass
class lowerCAmelCase_ :
_UpperCamelCase : str = field(
default=__snake_case , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
_UpperCamelCase : str = field(
default=__snake_case , metadata={"help": "Evaluation language. Also train language if `train_language` is set to None."} )
_UpperCamelCase : Optional[str] = field(
default=__snake_case , metadata={"help": "Train language if it is different from the evaluation language."} )
_UpperCamelCase : Optional[str] = field(
default=__snake_case , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
_UpperCamelCase : Optional[str] = field(
default=__snake_case , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
_UpperCamelCase : Optional[str] = field(
default=__snake_case , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
_UpperCamelCase : Optional[bool] = field(
default=__snake_case , metadata={"help": "arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"} , )
_UpperCamelCase : bool = field(
default=__snake_case , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
_UpperCamelCase : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
_UpperCamelCase : bool = field(
default=__snake_case , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
_UpperCamelCase : bool = field(
default=__snake_case , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , )
def __magic_name__ ( ) -> List[str]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_lowercase : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_lowercase , _lowercase , _lowercase : Any = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_xnli' , SCREAMING_SNAKE_CASE )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_lowercase : int = training_args.get_process_log_level()
logger.setLevel(SCREAMING_SNAKE_CASE )
datasets.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE )
transformers.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
_lowercase : Any = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_lowercase : int = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
_lowercase : Dict = load_dataset(
'xnli' , model_args.language , split='train' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
_lowercase : List[str] = load_dataset(
'xnli' , model_args.train_language , split='train' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
_lowercase : Optional[Any] = train_dataset.features['label'].names
if training_args.do_eval:
_lowercase : List[str] = load_dataset(
'xnli' , model_args.language , split='validation' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
_lowercase : Any = eval_dataset.features['label'].names
if training_args.do_predict:
_lowercase : Union[str, Any] = load_dataset(
'xnli' , model_args.language , split='test' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
_lowercase : List[str] = predict_dataset.features['label'].names
# Labels
_lowercase : Any = len(SCREAMING_SNAKE_CASE )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowercase : Dict = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=SCREAMING_SNAKE_CASE , idalabel={str(SCREAMING_SNAKE_CASE ): label for i, label in enumerate(SCREAMING_SNAKE_CASE )} , labelaid={label: i for i, label in enumerate(SCREAMING_SNAKE_CASE )} , finetuning_task='xnli' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_lowercase : List[str] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_lowercase : Optional[int] = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
_lowercase : Any = 'max_length'
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
_lowercase : Tuple = False
def preprocess_function(SCREAMING_SNAKE_CASE ):
# Tokenize the texts
return tokenizer(
examples['premise'] , examples['hypothesis'] , padding=SCREAMING_SNAKE_CASE , max_length=data_args.max_seq_length , truncation=SCREAMING_SNAKE_CASE , )
if training_args.do_train:
if data_args.max_train_samples is not None:
_lowercase : Tuple = min(len(SCREAMING_SNAKE_CASE ) , data_args.max_train_samples )
_lowercase : Optional[int] = train_dataset.select(range(SCREAMING_SNAKE_CASE ) )
with training_args.main_process_first(desc='train dataset map pre-processing' ):
_lowercase : Optional[Any] = train_dataset.map(
SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on train dataset' , )
# Log a few random samples from the training set:
for index in random.sample(range(len(SCREAMING_SNAKE_CASE ) ) , 3 ):
logger.info(F"""Sample {index} of the training set: {train_dataset[index]}.""" )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
_lowercase : str = min(len(SCREAMING_SNAKE_CASE ) , data_args.max_eval_samples )
_lowercase : Union[str, Any] = eval_dataset.select(range(SCREAMING_SNAKE_CASE ) )
with training_args.main_process_first(desc='validation dataset map pre-processing' ):
_lowercase : Optional[Any] = eval_dataset.map(
SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on validation dataset' , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
_lowercase : Optional[int] = min(len(SCREAMING_SNAKE_CASE ) , data_args.max_predict_samples )
_lowercase : str = predict_dataset.select(range(SCREAMING_SNAKE_CASE ) )
with training_args.main_process_first(desc='prediction dataset map pre-processing' ):
_lowercase : Any = predict_dataset.map(
SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on prediction dataset' , )
# Get the metric function
_lowercase : Dict = evaluate.load('xnli' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(SCREAMING_SNAKE_CASE ):
_lowercase : Tuple = p.predictions[0] if isinstance(p.predictions , SCREAMING_SNAKE_CASE ) else p.predictions
_lowercase : List[str] = np.argmax(SCREAMING_SNAKE_CASE , axis=1 )
return metric.compute(predictions=SCREAMING_SNAKE_CASE , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
_lowercase : Union[str, Any] = default_data_collator
elif training_args.fpaa:
_lowercase : List[Any] = DataCollatorWithPadding(SCREAMING_SNAKE_CASE , pad_to_multiple_of=8 )
else:
_lowercase : List[str] = None
# Initialize our Trainer
_lowercase : List[Any] = Trainer(
model=SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , data_collator=SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
_lowercase : Tuple = None
if training_args.resume_from_checkpoint is not None:
_lowercase : str = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_lowercase : str = last_checkpoint
_lowercase : Optional[int] = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE )
_lowercase : Union[str, Any] = train_result.metrics
_lowercase : Dict = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(SCREAMING_SNAKE_CASE )
)
_lowercase : Optional[int] = min(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('train' , SCREAMING_SNAKE_CASE )
trainer.save_metrics('train' , SCREAMING_SNAKE_CASE )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_lowercase : Dict = trainer.evaluate(eval_dataset=SCREAMING_SNAKE_CASE )
_lowercase : Optional[Any] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(SCREAMING_SNAKE_CASE )
_lowercase : Optional[Any] = min(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) )
trainer.log_metrics('eval' , SCREAMING_SNAKE_CASE )
trainer.save_metrics('eval' , SCREAMING_SNAKE_CASE )
# Prediction
if training_args.do_predict:
logger.info('*** Predict ***' )
_lowercase , _lowercase , _lowercase : Optional[int] = trainer.predict(SCREAMING_SNAKE_CASE , metric_key_prefix='predict' )
_lowercase : Optional[int] = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(SCREAMING_SNAKE_CASE )
)
_lowercase : Tuple = min(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) )
trainer.log_metrics('predict' , SCREAMING_SNAKE_CASE )
trainer.save_metrics('predict' , SCREAMING_SNAKE_CASE )
_lowercase : Tuple = np.argmax(SCREAMING_SNAKE_CASE , axis=1 )
_lowercase : List[str] = os.path.join(training_args.output_dir , 'predictions.txt' )
if trainer.is_world_process_zero():
with open(SCREAMING_SNAKE_CASE , 'w' ) as writer:
writer.write('index\tprediction\n' )
for index, item in enumerate(SCREAMING_SNAKE_CASE ):
_lowercase : Union[str, Any] = label_list[item]
writer.write(F"""{index}\t{item}\n""" )
if __name__ == "__main__":
main()
| 677 |
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class lowerCAmelCase_ ( unittest.TestCase ):
def __a ( self ):
_lowercase : List[str] = logging.get_logger()
# the current default level is logging.WARNING
_lowercase : Union[str, Any] = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(_lowerCAmelCase )
def __a ( self ):
_lowercase : List[str] = logging.get_verbosity()
_lowercase : int = logging.get_logger('transformers.models.bart.tokenization_bart' )
_lowercase : Tuple = 'Testing 1, 2, 3'
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(_lowerCAmelCase ) as cl:
logger.warning(_lowerCAmelCase )
self.assertEqual(cl.out , msg + '\n' )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(_lowerCAmelCase ) as cl:
logger.warning(_lowerCAmelCase )
self.assertEqual(cl.out , '' )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(_lowerCAmelCase ) as cl:
logger.warning(_lowerCAmelCase )
self.assertEqual(cl.out , msg + '\n' )
# restore to the original level
logging.set_verbosity(_lowerCAmelCase )
@mockenv(TRANSFORMERS_VERBOSITY='error' )
def __a ( self ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
_lowercase : List[str] = logging.get_logger('transformers.models.bart.tokenization_bart' )
_lowercase : int = os.getenv('TRANSFORMERS_VERBOSITY' , _lowerCAmelCase )
_lowercase : Optional[Any] = logging.log_levels[env_level_str]
_lowercase : Dict = logging.get_verbosity()
self.assertEqual(
_lowerCAmelCase , _lowerCAmelCase , F"""TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}""" , )
# restore to the original level
_lowercase : Any = ''
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY='super-error' )
def __a ( self ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
_lowercase : Tuple = logging.logging.getLogger()
with CaptureLogger(_lowerCAmelCase ) as cl:
# this action activates the env var
logging.get_logger('transformers.models.bart.tokenization_bart' )
self.assertIn('Unknown option TRANSFORMERS_VERBOSITY=super-error' , cl.out )
# no need to restore as nothing was changed
def __a ( self ):
# testing `logger.warning_advice()`
transformers.utils.logging._reset_library_root_logger()
_lowercase : str = logging.get_logger('transformers.models.bart.tokenization_bart' )
_lowercase : List[str] = 'Testing 1, 2, 3'
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='1' ):
# nothing should be logged as env var disables this method
with CaptureLogger(_lowerCAmelCase ) as cl:
logger.warning_advice(_lowerCAmelCase )
self.assertEqual(cl.out , '' )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='' ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(_lowerCAmelCase ) as cl:
logger.warning_advice(_lowerCAmelCase )
self.assertEqual(cl.out , msg + '\n' )
def __magic_name__ ( ) -> List[str]:
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 677 | 1 |
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase_ ( __snake_case , unittest.TestCase ):
_UpperCamelCase : Dict = GPTaTokenizer
_UpperCamelCase : List[Any] = GPTaTokenizerFast
_UpperCamelCase : Dict = True
_UpperCamelCase : int = {"add_prefix_space": True}
_UpperCamelCase : Union[str, Any] = False
def __a ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowercase : Any = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
'<|endoftext|>',
]
_lowercase : Tuple = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
_lowercase : Tuple = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
_lowercase : Any = {'unk_token': '<unk>'}
_lowercase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_lowercase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_lowerCAmelCase ) )
def __a ( self , **_lowerCAmelCase ):
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def __a ( self , **_lowerCAmelCase ):
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def __a ( self , _lowerCAmelCase ):
_lowercase : int = 'lower newer'
_lowercase : Dict = 'lower newer'
return input_text, output_text
def __a ( self ):
_lowercase : List[str] = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_lowercase : Any = 'lower newer'
_lowercase : Optional[int] = ['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er']
_lowercase : Tuple = tokenizer.tokenize(_lowerCAmelCase , add_prefix_space=_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : Optional[Any] = tokens + [tokenizer.unk_token]
_lowercase : List[Any] = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , _lowerCAmelCase )
def __a ( self ):
if not self.test_rust_tokenizer:
return
_lowercase : Optional[int] = self.get_tokenizer()
_lowercase : List[Any] = self.get_rust_tokenizer(add_prefix_space=_lowerCAmelCase )
_lowercase : List[str] = 'lower newer'
# Testing tokenization
_lowercase : List[str] = tokenizer.tokenize(_lowerCAmelCase , add_prefix_space=_lowerCAmelCase )
_lowercase : Union[str, Any] = rust_tokenizer.tokenize(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
# Testing conversion to ids without special tokens
_lowercase : List[str] = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase )
_lowercase : Optional[Any] = rust_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
# Testing conversion to ids with special tokens
_lowercase : Optional[Any] = self.get_rust_tokenizer(add_prefix_space=_lowerCAmelCase )
_lowercase : Dict = tokenizer.encode(_lowerCAmelCase , add_prefix_space=_lowerCAmelCase )
_lowercase : str = rust_tokenizer.encode(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
# Testing the unknown token
_lowercase : List[str] = tokens + [rust_tokenizer.unk_token]
_lowercase : Any = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , _lowerCAmelCase )
def __a ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def __a ( self , _lowerCAmelCase=1_5 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_lowercase : List[str] = self.rust_tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
# Simple input
_lowercase : Any = 'This is a simple input'
_lowercase : Optional[int] = ['This is a simple input 1', 'This is a simple input 2']
_lowercase : Optional[int] = ('This is a simple input', 'This is a pair')
_lowercase : List[str] = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(_lowerCAmelCase , tokenizer_r.encode , _lowerCAmelCase , max_length=_lowerCAmelCase , padding='max_length' )
# Simple input
self.assertRaises(_lowerCAmelCase , tokenizer_r.encode_plus , _lowerCAmelCase , max_length=_lowerCAmelCase , padding='max_length' )
# Simple input
self.assertRaises(
_lowerCAmelCase , tokenizer_r.batch_encode_plus , _lowerCAmelCase , max_length=_lowerCAmelCase , padding='max_length' , )
# Pair input
self.assertRaises(_lowerCAmelCase , tokenizer_r.encode , _lowerCAmelCase , max_length=_lowerCAmelCase , padding='max_length' )
# Pair input
self.assertRaises(_lowerCAmelCase , tokenizer_r.encode_plus , _lowerCAmelCase , max_length=_lowerCAmelCase , padding='max_length' )
# Pair input
self.assertRaises(
_lowerCAmelCase , tokenizer_r.batch_encode_plus , _lowerCAmelCase , max_length=_lowerCAmelCase , padding='max_length' , )
def __a ( self ):
_lowercase : Dict = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token='<pad>' )
# Simple input
_lowercase : List[str] = 'This is a simple input'
_lowercase : Union[str, Any] = ['This is a simple input looooooooong', 'This is a simple input']
_lowercase : List[Any] = ('This is a simple input', 'This is a pair')
_lowercase : Tuple = [
('This is a simple input loooooong', 'This is a simple input'),
('This is a simple pair loooooong', 'This is a simple pair'),
]
_lowercase : List[Any] = tokenizer.pad_token_id
_lowercase : Optional[int] = tokenizer(_lowerCAmelCase , padding='max_length' , max_length=3_0 , return_tensors='np' )
_lowercase : Union[str, Any] = tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , truncate=_lowerCAmelCase , return_tensors='np' )
_lowercase : Tuple = tokenizer(*_lowerCAmelCase , padding='max_length' , max_length=6_0 , return_tensors='np' )
_lowercase : Any = tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , truncate=_lowerCAmelCase , return_tensors='np' )
# s
# test single string max_length padding
self.assertEqual(out_s['input_ids'].shape[-1] , 3_0 )
self.assertTrue(pad_token_id in out_s['input_ids'] )
self.assertTrue(0 in out_s['attention_mask'] )
# s2
# test automatic padding
self.assertEqual(out_sa['input_ids'].shape[-1] , 3_3 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['input_ids'][0] )
self.assertFalse(0 in out_sa['attention_mask'][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['input_ids'][1] )
self.assertTrue(0 in out_sa['attention_mask'][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['input_ids'].shape[-1] , 6_0 )
self.assertTrue(pad_token_id in out_p['input_ids'] )
self.assertTrue(0 in out_p['attention_mask'] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['input_ids'].shape[-1] , 5_2 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['input_ids'][0] )
self.assertFalse(0 in out_pa['attention_mask'][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['input_ids'][1] )
self.assertTrue(0 in out_pa['attention_mask'][1] )
def __a ( self ):
_lowercase : Tuple = '$$$'
_lowercase : str = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=_lowerCAmelCase , add_bos_token=_lowerCAmelCase )
_lowercase : int = 'This is a simple input'
_lowercase : Tuple = ['This is a simple input 1', 'This is a simple input 2']
_lowercase : List[Any] = tokenizer.bos_token_id
_lowercase : Union[str, Any] = tokenizer(_lowerCAmelCase )
_lowercase : int = tokenizer(_lowerCAmelCase )
self.assertEqual(out_s.input_ids[0] , _lowerCAmelCase )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_lowercase : Optional[int] = tokenizer.decode(out_s.input_ids )
_lowercase : str = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , _lowerCAmelCase )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def __a ( self ):
pass
def __a ( self ):
# TODO: change to self.get_tokenizers() when the fast version is implemented
_lowercase : List[str] = [self.get_tokenizer(do_lower_case=_lowerCAmelCase , add_bos_token=_lowerCAmelCase )]
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_lowercase : Tuple = 'Encode this.'
_lowercase : Any = 'This one too please.'
_lowercase : Tuple = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
encoded_sequence += tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
_lowercase : str = tokenizer.encode_plus(
_lowerCAmelCase , _lowerCAmelCase , add_special_tokens=_lowerCAmelCase , return_special_tokens_mask=_lowerCAmelCase , )
_lowercase : int = encoded_sequence_dict['input_ids']
_lowercase : Optional[int] = encoded_sequence_dict['special_tokens_mask']
self.assertEqual(len(_lowerCAmelCase ) , len(_lowerCAmelCase ) )
_lowercase : Optional[Any] = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(_lowerCAmelCase )
]
_lowercase : int = [x for x in filtered_sequence if x is not None]
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
@require_tokenizers
class lowerCAmelCase_ ( unittest.TestCase ):
def __a ( self ):
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
_lowercase : Tuple = AutoTokenizer.from_pretrained('facebook/opt-350m' , from_slow=_lowerCAmelCase )
_lowercase : Any = 'A photo of a cat'
_lowercase : List[str] = tokenizer.encode(
_lowerCAmelCase , )
self.assertEqual(_lowerCAmelCase , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
tokenizer.save_pretrained('test_opt' )
_lowercase : Optional[Any] = AutoTokenizer.from_pretrained('./test_opt' )
_lowercase : Tuple = tokenizer.encode(
_lowerCAmelCase , )
self.assertEqual(_lowerCAmelCase , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
def __a ( self ):
_lowercase : str = AutoTokenizer.from_pretrained('facebook/opt-350m' , use_slow=_lowerCAmelCase )
_lowercase : str = 'A photo of a cat'
_lowercase : Union[str, Any] = tokenizer.encode(
_lowerCAmelCase , )
# Same as above
self.assertEqual(_lowerCAmelCase , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
@unittest.skip('This test is failing because of a bug in the fast tokenizer' )
def __a ( self ):
_lowercase : int = AutoTokenizer.from_pretrained('facebook/opt-350m' , from_slow=_lowerCAmelCase )
_lowercase : Dict = 'bos'
_lowercase : List[Any] = tokenizer.get_vocab()['bos']
_lowercase : str = 'A photo of a cat'
_lowercase : Tuple = tokenizer.encode(
_lowerCAmelCase , )
# We changed the bos token
self.assertEqual(_lowerCAmelCase , [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
tokenizer.save_pretrained('./tok' )
_lowercase : Optional[int] = AutoTokenizer.from_pretrained('./tok' )
self.assertTrue(tokenizer.is_fast )
_lowercase : Optional[Any] = tokenizer.encode(
_lowerCAmelCase , )
self.assertEqual(_lowerCAmelCase , [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
| 677 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
UpperCamelCase = "pt"
elif is_tf_available():
UpperCamelCase = "tf"
else:
UpperCamelCase = "jax"
class lowerCAmelCase_ ( __snake_case , unittest.TestCase ):
_UpperCamelCase : Dict = PerceiverTokenizer
_UpperCamelCase : str = False
def __a ( self ):
super().setUp()
_lowercase : List[Any] = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __a ( self ):
return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' )
def __a ( self , **_lowerCAmelCase ):
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=2_0 , _lowerCAmelCase=5 ):
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
_lowercase : Union[str, Any] = []
for i in range(len(_lowerCAmelCase ) ):
try:
_lowercase : Any = tokenizer.decode([i] , clean_up_tokenization_spaces=_lowerCAmelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
_lowercase : List[Any] = list(filter(lambda _lowerCAmelCase : re.match(r'^[ a-zA-Z]+$' , t[1] ) , _lowerCAmelCase ) )
_lowercase : Union[str, Any] = list(filter(lambda _lowerCAmelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_lowerCAmelCase ) , _lowerCAmelCase ) )
if max_length is not None and len(_lowerCAmelCase ) > max_length:
_lowercase : Any = toks[:max_length]
if min_length is not None and len(_lowerCAmelCase ) < min_length and len(_lowerCAmelCase ) > 0:
while len(_lowerCAmelCase ) < min_length:
_lowercase : Optional[Any] = toks + toks
# toks_str = [t[1] for t in toks]
_lowercase : Optional[Any] = [t[0] for t in toks]
# Ensure consistency
_lowercase : Any = tokenizer.decode(_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase )
if " " not in output_txt and len(_lowerCAmelCase ) > 1:
_lowercase : List[str] = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_lowerCAmelCase )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_lowerCAmelCase )
)
if with_prefix_space:
_lowercase : List[Any] = ' ' + output_txt
_lowercase : Dict = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
return output_txt, output_ids
def __a ( self ):
_lowercase : Dict = self.perceiver_tokenizer
_lowercase : Optional[Any] = 'Unicode €.'
_lowercase : str = tokenizer(_lowerCAmelCase )
_lowercase : int = [4, 9_1, 1_1_6, 1_1_1, 1_0_5, 1_1_7, 1_0_6, 1_0_7, 3_8, 2_3_2, 1_3_6, 1_7_8, 5_2, 5]
self.assertEqual(encoded['input_ids'] , _lowerCAmelCase )
# decoding
_lowercase : List[Any] = tokenizer.decode(_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , '[CLS]Unicode €.[SEP]' )
_lowercase : Union[str, Any] = tokenizer('e è é ê ë' )
_lowercase : List[Any] = [4, 1_0_7, 3_8, 2_0_1, 1_7_4, 3_8, 2_0_1, 1_7_5, 3_8, 2_0_1, 1_7_6, 3_8, 2_0_1, 1_7_7, 5]
self.assertEqual(encoded['input_ids'] , _lowerCAmelCase )
# decoding
_lowercase : int = tokenizer.decode(_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , '[CLS]e è é ê ë[SEP]' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , '[CLS]e è é ê ë[SEP]' )
def __a ( self ):
_lowercase : List[str] = self.perceiver_tokenizer
_lowercase : Union[str, Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
_lowercase : Optional[int] = [4, 7_1, 3_8, 1_1_4, 1_1_7, 1_1_6, 1_0_9, 3_8, 1_1_8, 1_0_3, 1_2_0, 1_0_3, 1_0_9, 1_2_0, 1_0_3, 1_1_8, 1_1_0, 3_8, 1_0_8, 1_1_7, 1_2_0, 3_8, 1_2_1, 1_2_3, 1_1_5, 1_1_5, 1_0_3, 1_2_0, 1_1_1, 1_2_8, 1_0_3, 1_2_2, 1_1_1, 1_1_7, 1_1_6, 5_2, 5, 0]
# fmt: on
_lowercase : List[Any] = tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors=_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
if FRAMEWORK != "jax":
_lowercase : int = list(batch.input_ids.numpy()[0] )
else:
_lowercase : List[Any] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual((2, 3_8) , batch.input_ids.shape )
self.assertEqual((2, 3_8) , batch.attention_mask.shape )
def __a ( self ):
_lowercase : List[Any] = self.perceiver_tokenizer
_lowercase : Dict = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_lowercase : List[str] = tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors=_lowerCAmelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , _lowerCAmelCase )
self.assertIn('attention_mask' , _lowerCAmelCase )
self.assertNotIn('decoder_input_ids' , _lowerCAmelCase )
self.assertNotIn('decoder_attention_mask' , _lowerCAmelCase )
def __a ( self ):
_lowercase : Optional[int] = self.perceiver_tokenizer
_lowercase : Optional[Any] = [
'Summary of the text.',
'Another summary.',
]
_lowercase : Optional[int] = tokenizer(
text_target=_lowerCAmelCase , max_length=3_2 , padding='max_length' , truncation=_lowerCAmelCase , return_tensors=_lowerCAmelCase )
self.assertEqual(3_2 , targets['input_ids'].shape[1] )
def __a ( self ):
# safety check on max_len default value so we are sure the test works
_lowercase : Tuple = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 4_2 )
# Now let's start the test
_lowercase : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
_lowercase : Dict = tempfile.mkdtemp()
_lowercase : Tuple = ' He is very happy, UNwant\u00E9d,running'
_lowercase : Union[str, Any] = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
tokenizer.save_pretrained(_lowerCAmelCase )
_lowercase : Tuple = tokenizer.__class__.from_pretrained(_lowerCAmelCase )
_lowercase : Optional[Any] = after_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
shutil.rmtree(_lowerCAmelCase )
_lowercase : Union[str, Any] = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
_lowercase : List[str] = tempfile.mkdtemp()
_lowercase : int = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
_lowercase : Any = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
_lowercase : Tuple = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
tokenizer.save_pretrained(_lowerCAmelCase )
_lowercase : Tuple = tokenizer.__class__.from_pretrained(_lowerCAmelCase )
_lowercase : Tuple = after_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 4_2 )
_lowercase : List[Any] = tokenizer.__class__.from_pretrained(_lowerCAmelCase , model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length , 4_3 )
shutil.rmtree(_lowerCAmelCase )
def __a ( self ):
_lowercase : Optional[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
_lowercase : List[str] = json.load(_lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
_lowercase : Tuple = json.load(_lowerCAmelCase )
_lowercase : Any = [F"""<extra_id_{i}>""" for i in range(1_2_5 )]
_lowercase : str = added_tokens_extra_ids + [
'an_additional_special_token'
]
_lowercase : Optional[int] = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(_lowerCAmelCase , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_lowercase : Optional[int] = tokenizer_class.from_pretrained(
_lowerCAmelCase , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_lowercase : int = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=_lowerCAmelCase )]
_lowercase : Tuple = tokenizer_class.from_pretrained(
_lowerCAmelCase , additional_special_tokens=_lowerCAmelCase , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def __a ( self ):
_lowercase : str = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([1_7_8] ) , '�' )
def __a ( self ):
pass
def __a ( self ):
pass
def __a ( self ):
pass
def __a ( self ):
pass
def __a ( self ):
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
_lowercase : List[str] = self.get_tokenizers(fast=_lowerCAmelCase , do_lower_case=_lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_lowercase : Optional[Any] = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]']
_lowercase : Optional[Any] = tokenizer.convert_tokens_to_string(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
| 677 | 1 |
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 677 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase = {
"configuration_conditional_detr": [
"CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ConditionalDetrConfig",
"ConditionalDetrOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["ConditionalDetrFeatureExtractor"]
UpperCamelCase = ["ConditionalDetrImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConditionalDetrForObjectDetection",
"ConditionalDetrForSegmentation",
"ConditionalDetrModel",
"ConditionalDetrPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 677 | 1 |
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
UpperCamelCase = logging.getLogger(__name__)
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser(
description="Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)"
)
parser.add_argument(
"--data_file", type=str, default="data/dump.bert-base-uncased.pickle", help="The binarized dataset."
)
parser.add_argument(
"--token_counts_dump", type=str, default="data/token_counts.bert-base-uncased.pickle", help="The dump file."
)
parser.add_argument("--vocab_size", default=30_522, type=int)
UpperCamelCase = parser.parse_args()
logger.info(f'''Loading data from {args.data_file}''')
with open(args.data_file, "rb") as fp:
UpperCamelCase = pickle.load(fp)
logger.info("Counting occurrences for MLM.")
UpperCamelCase = Counter()
for tk_ids in data:
counter.update(tk_ids)
UpperCamelCase = [0] * args.vocab_size
for k, v in counter.items():
UpperCamelCase = v
logger.info(f'''Dump to {args.token_counts_dump}''')
with open(args.token_counts_dump, "wb") as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 677 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCAmelCase_ ( __snake_case ):
_UpperCamelCase : Tuple = "ClapFeatureExtractor"
_UpperCamelCase : Optional[int] = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ):
super().__init__(_lowerCAmelCase , _lowerCAmelCase )
def __call__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , **_lowerCAmelCase ):
_lowercase : str = kwargs.pop('sampling_rate' , _lowerCAmelCase )
if text is None and audios is None:
raise ValueError('You have to specify either text or audios. Both cannot be none.' )
if text is not None:
_lowercase : Dict = self.tokenizer(_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase )
if audios is not None:
_lowercase : Any = self.feature_extractor(
_lowerCAmelCase , sampling_rate=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase )
if text is not None and audios is not None:
_lowercase : Union[str, Any] = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_lowerCAmelCase ) , tensor_type=_lowerCAmelCase )
def __a ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
return self.tokenizer.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase )
def __a ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
return self.tokenizer.decode(*_lowerCAmelCase , **_lowerCAmelCase )
@property
def __a ( self ):
_lowercase : Dict = self.tokenizer.model_input_names
_lowercase : Any = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 677 | 1 |
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase , ):
_lowercase : Tuple = parent
_lowercase : Dict = 1_3
_lowercase : Tuple = 7
_lowercase : List[Any] = 3_0
_lowercase : Union[str, Any] = self.seq_length + self.mem_len
_lowercase : Dict = 1_5
_lowercase : int = True
_lowercase : Union[str, Any] = True
_lowercase : str = 9_9
_lowercase : Union[str, Any] = [1_0, 5_0, 8_0]
_lowercase : Union[str, Any] = 3_2
_lowercase : str = 3_2
_lowercase : List[str] = 4
_lowercase : List[str] = 8
_lowercase : Dict = 1_2_8
_lowercase : str = 2
_lowercase : Dict = 2
_lowercase : Dict = None
_lowercase : Tuple = 1
_lowercase : str = 0
_lowercase : int = 3
_lowercase : int = self.vocab_size - 1
_lowercase : str = 0.01
def __a ( self ):
_lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase : int = None
if self.use_labels:
_lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase : str = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def __a ( self ):
random.seed(self.seed )
tf.random.set_seed(self.seed )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Union[str, Any] = TFTransfoXLModel(_lowerCAmelCase )
_lowercase , _lowercase : List[Any] = model(_lowerCAmelCase ).to_tuple()
_lowercase : Dict = {'input_ids': input_ids_a, 'mems': mems_a}
_lowercase , _lowercase : Dict = model(_lowerCAmelCase ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Any = TFTransfoXLLMHeadModel(_lowerCAmelCase )
_lowercase , _lowercase : Optional[Any] = model(_lowerCAmelCase ).to_tuple()
_lowercase : Any = {'input_ids': input_ids_a, 'labels': lm_labels}
_lowercase , _lowercase : List[str] = model(_lowerCAmelCase ).to_tuple()
_lowercase , _lowercase : Optional[int] = model([input_ids_a, mems_a] ).to_tuple()
_lowercase : str = {'input_ids': input_ids_a, 'mems': mems_a, 'labels': lm_labels}
_lowercase , _lowercase : Tuple = model(_lowerCAmelCase ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : int = TFTransfoXLForSequenceClassification(_lowerCAmelCase )
_lowercase : Dict = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self ):
_lowercase : Any = self.prepare_config_and_inputs()
((_lowercase) , (_lowercase) , (_lowercase) , (_lowercase)) : Any = config_and_inputs
_lowercase : str = {'input_ids': input_ids_a}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( __snake_case , __snake_case , unittest.TestCase ):
_UpperCamelCase : Optional[Any] = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
_UpperCamelCase : Union[str, Any] = () if is_tf_available() else ()
_UpperCamelCase : Optional[Any] = (
{
"feature-extraction": TFTransfoXLModel,
"text-classification": TFTransfoXLForSequenceClassification,
"text-generation": TFTransfoXLLMHeadModel,
"zero-shot": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
_UpperCamelCase : Union[str, Any] = False
_UpperCamelCase : List[Any] = False
_UpperCamelCase : List[str] = False
_UpperCamelCase : str = False
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def __a ( self ):
_lowercase : Tuple = TFTransfoXLModelTester(self )
_lowercase : Optional[int] = ConfigTester(self , config_class=_lowerCAmelCase , d_embed=3_7 )
def __a ( self ):
self.config_tester.run_common_tests()
def __a ( self ):
self.model_tester.set_seed()
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*_lowerCAmelCase )
def __a ( self ):
self.model_tester.set_seed()
_lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*_lowerCAmelCase )
def __a ( self ):
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*_lowerCAmelCase )
def __a ( self ):
_lowercase , _lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase : Union[str, Any] = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
_lowercase : List[Any] = model_class(_lowerCAmelCase )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
_lowercase : List[Any] = model.get_output_embeddings()
assert isinstance(_lowerCAmelCase , tf.keras.layers.Layer )
_lowercase : Dict = model.get_bias()
assert name is None
else:
_lowercase : List[Any] = model.get_output_embeddings()
assert x is None
_lowercase : List[Any] = model.get_bias()
assert name is None
def __a ( self ):
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def __a ( self ):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : Dict = TFTransfoXLModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@unittest.skip(reason='This model doesn\'t play well with fit() due to not returning a single loss.' )
def __a ( self ):
pass
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
@unittest.skip('Skip test until #12651 is resolved.' )
@slow
def __a ( self ):
_lowercase : List[str] = TFTransfoXLLMHeadModel.from_pretrained('transfo-xl-wt103' )
# fmt: off
_lowercase : Dict = tf.convert_to_tensor([[3_3,1_2_9_7,2,1,1_0_0_9,4,1_1_0_9,1_1_7_3_9,4_7_6_2,3_5_8,5,2_5,2_4_5,2_2,1_7_0_6,1_7,2_0_0_9_8,5,3_2_1_5,2_1,3_7,1_1_1_0,3,1_3,1_0_4_1,4,2_4,6_0_3,4_9_0,2,7_1_4_7_7,2_0_0_9_8,1_0_4_4_4_7,2,2_0_9_6_1,1,2_6_0_4,4,1,3_2_9,3,6_2_2_4,8_3_1,1_6_0_0_2,2,8,6_0_3,7_8_9_6_7,2_9_5_4_6,2_3,8_0_3,2_0,2_5,4_1_6,5,8,2_3_2,4,2_7_7,6,1_8_5_5,4_6_0_1,3,2_9_5_4_6,5_4,8,3_6_0_9,5,5_7_2_1_1,4_9,4,1,2_7_7,1_8,8,1_7_5_5,1_5_6_9_1,3,3_4_1,2_5,4_1_6,6_9_3,4_2_5_7_3,7_1,1_7,4_0_1,9_4,3_1,1_7_9_1_9,2,2_9_5_4_6,7_8_7_3,1_8,1,4_3_5,2_3,1_1_0_1_1,7_5_5,5,5_1_6_7,3,7_9_8_3,9_8,8_4,2,2_9_5_4_6,3_2_6_7,8,3_6_0_9,4,1,4_8_6_5,1_0_7_5,2,6_0_8_7,7_1,6,3_4_6,8,5_8_5_4,3,2_9_5_4_6,8_2_4,1_4_0_0,1_8_6_8,2,1_9,1_6_0,2,3_1_1,8,5_4_9_6,2,2_0_9_2_0,1_7,2_5,1_5_0_9_7,3,2_4,2_4,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
_lowercase : Optional[int] = [3_3,1_2_9_7,2,1,1_0_0_9,4,1_1_0_9,1_1_7_3_9,4_7_6_2,3_5_8,5,2_5,2_4_5,2_2,1_7_0_6,1_7,2_0_0_9_8,5,3_2_1_5,2_1,3_7,1_1_1_0,3,1_3,1_0_4_1,4,2_4,6_0_3,4_9_0,2,7_1_4_7_7,2_0_0_9_8,1_0_4_4_4_7,2,2_0_9_6_1,1,2_6_0_4,4,1,3_2_9,3,6_2_2_4,8_3_1,1_6_0_0_2,2,8,6_0_3,7_8_9_6_7,2_9_5_4_6,2_3,8_0_3,2_0,2_5,4_1_6,5,8,2_3_2,4,2_7_7,6,1_8_5_5,4_6_0_1,3,2_9_5_4_6,5_4,8,3_6_0_9,5,5_7_2_1_1,4_9,4,1,2_7_7,1_8,8,1_7_5_5,1_5_6_9_1,3,3_4_1,2_5,4_1_6,6_9_3,4_2_5_7_3,7_1,1_7,4_0_1,9_4,3_1,1_7_9_1_9,2,2_9_5_4_6,7_8_7_3,1_8,1,4_3_5,2_3,1_1_0_1_1,7_5_5,5,5_1_6_7,3,7_9_8_3,9_8,8_4,2,2_9_5_4_6,3_2_6_7,8,3_6_0_9,4,1,4_8_6_5,1_0_7_5,2,6_0_8_7,7_1,6,3_4_6,8,5_8_5_4,3,2_9_5_4_6,8_2_4,1_4_0_0,1_8_6_8,2,1_9,1_6_0,2,3_1_1,8,5_4_9_6,2,2_0_9_2_0,1_7,2_5,1_5_0_9_7,3,2_4,2_4,0,3_3,1,1_8_5_7,2,1,1_0_0_9,4,1_1_0_9,1_1_7_3_9,4_7_6_2,3_5_8,5,2_5,2_4_5,2_8,1_1_1_0,3,1_3,1_0_4_1,4,2_4,6_0_3,4_9_0,2,7_1_4_7_7,2_0_0_9_8,1_0_4_4_4_7,2,2_0_9_6_1,1,2_6_0_4,4,1,3_2_9,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
_lowercase : str = model.generate(_lowerCAmelCase , max_length=2_0_0 , do_sample=_lowerCAmelCase )
self.assertListEqual(output_ids[0].numpy().tolist() , _lowerCAmelCase )
| 677 |
from __future__ import annotations
from typing import Any
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase ):
_lowercase : Any = num_of_nodes
_lowercase : list[list[int]] = []
_lowercase : dict[int, int] = {}
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
self.m_edges.append([u_node, v_node, weight] )
def __a ( self , _lowerCAmelCase ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def __a ( self , _lowerCAmelCase ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
_lowercase : Optional[int] = self.find_component(_lowerCAmelCase )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
if component_size[u_node] <= component_size[v_node]:
_lowercase : str = v_node
component_size[v_node] += component_size[u_node]
self.set_component(_lowerCAmelCase )
elif component_size[u_node] >= component_size[v_node]:
_lowercase : Any = self.find_component(_lowerCAmelCase )
component_size[u_node] += component_size[v_node]
self.set_component(_lowerCAmelCase )
def __a ( self ):
_lowercase : Any = []
_lowercase : Optional[Any] = 0
_lowercase : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
_lowercase : str = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
_lowercase , _lowercase , _lowercase : List[str] = edge
_lowercase : Union[str, Any] = self.m_component[u]
_lowercase : Union[str, Any] = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
_lowercase : str = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_lowercase , _lowercase , _lowercase : int = edge
_lowercase : Optional[int] = self.m_component[u]
_lowercase : Optional[Any] = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
print(F"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" )
num_of_components -= 1
_lowercase : str = [-1] * self.m_num_of_nodes
print(F"""The total weight of the minimal spanning tree is: {mst_weight}""" )
def __magic_name__ ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 677 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
UpperCamelCase = "Create a default config file for Accelerate with only a few flags set."
def __magic_name__ ( SCREAMING_SNAKE_CASE="no" , SCREAMING_SNAKE_CASE = default_json_config_file , SCREAMING_SNAKE_CASE = False ) -> str:
_lowercase : str = Path(SCREAMING_SNAKE_CASE )
path.parent.mkdir(parents=SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
if path.exists():
print(
F"""Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.""" )
return False
_lowercase : int = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F"""`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}""" )
_lowercase : List[str] = {
'compute_environment': 'LOCAL_MACHINE',
'mixed_precision': mixed_precision,
}
if torch.cuda.is_available():
_lowercase : Any = torch.cuda.device_count()
_lowercase : Union[str, Any] = num_gpus
_lowercase : Tuple = False
if num_gpus > 1:
_lowercase : str = 'MULTI_GPU'
else:
_lowercase : Optional[Any] = 'NO'
elif is_xpu_available() and use_xpu:
_lowercase : int = torch.xpu.device_count()
_lowercase : int = num_xpus
_lowercase : Optional[Any] = False
if num_xpus > 1:
_lowercase : Union[str, Any] = 'MULTI_XPU'
else:
_lowercase : Optional[int] = 'NO'
elif is_npu_available():
_lowercase : Optional[Any] = torch.npu.device_count()
_lowercase : Optional[Any] = num_npus
_lowercase : Union[str, Any] = False
if num_npus > 1:
_lowercase : Union[str, Any] = 'MULTI_NPU'
else:
_lowercase : int = 'NO'
else:
_lowercase : List[Any] = 0
_lowercase : Any = True
_lowercase : Union[str, Any] = 1
_lowercase : List[Any] = 'NO'
_lowercase : Tuple = ClusterConfig(**SCREAMING_SNAKE_CASE )
config.to_json_file(SCREAMING_SNAKE_CASE )
return path
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]:
_lowercase : Union[str, Any] = parser.add_parser('default' , parents=SCREAMING_SNAKE_CASE , help=SCREAMING_SNAKE_CASE , formatter_class=SCREAMING_SNAKE_CASE )
parser.add_argument(
'--config_file' , default=SCREAMING_SNAKE_CASE , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , dest='save_location' , )
parser.add_argument(
'--mixed_precision' , choices=['no', 'fp16', 'bf16'] , type=SCREAMING_SNAKE_CASE , help='Whether or not to use mixed precision training. '
'Choose between FP16 and BF16 (bfloat16) training. '
'BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.' , default='no' , )
parser.set_defaults(func=SCREAMING_SNAKE_CASE )
return parser
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> List[str]:
_lowercase : Optional[Any] = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F"""accelerate configuration saved at {config_file}""" )
| 677 |
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Optional[Any]:
_lowercase : Tuple = {}
_lowercase : str = tokenizer(example['content'] , truncation=SCREAMING_SNAKE_CASE )['input_ids']
_lowercase : List[str] = len(example['content'] ) / len(output['input_ids'] )
return output
UpperCamelCase = HfArgumentParser(PretokenizationArguments)
UpperCamelCase = parser.parse_args()
if args.num_workers is None:
UpperCamelCase = multiprocessing.cpu_count()
UpperCamelCase = AutoTokenizer.from_pretrained(args.tokenizer_dir)
UpperCamelCase = time.time()
UpperCamelCase = load_dataset(args.dataset_name, split="train")
print(f'''Dataset loaded in {time.time()-t_start:.2f}s''')
UpperCamelCase = time.time()
UpperCamelCase = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"repo_name",
"path",
"copies",
"size",
"content",
"license",
"hash",
"line_mean",
"line_max",
"alpha_frac",
"autogenerated",
],
)
print(f'''Dataset tokenized in {time.time()-t_start:.2f}s''')
UpperCamelCase = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(f'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 677 | 1 |
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"huggingface/autoformer-tourism-monthly": "https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json",
}
class lowerCAmelCase_ ( __snake_case ):
_UpperCamelCase : Optional[int] = "autoformer"
_UpperCamelCase : List[Any] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = "student_t" , _lowerCAmelCase = "nll" , _lowerCAmelCase = 1 , _lowerCAmelCase = [1, 2, 3, 4, 5, 6, 7] , _lowerCAmelCase = True , _lowerCAmelCase = 0 , _lowerCAmelCase = 0 , _lowerCAmelCase = 0 , _lowerCAmelCase = 0 , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = 6_4 , _lowerCAmelCase = 2 , _lowerCAmelCase = 2 , _lowerCAmelCase = 2 , _lowerCAmelCase = 2 , _lowerCAmelCase = 3_2 , _lowerCAmelCase = 3_2 , _lowerCAmelCase = "gelu" , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 1_0_0 , _lowerCAmelCase = 0.02 , _lowerCAmelCase = True , _lowerCAmelCase=True , _lowerCAmelCase = 1_0 , _lowerCAmelCase = 2_5 , _lowerCAmelCase = 3 , **_lowerCAmelCase , ):
# time series specific configuration
_lowercase : str = prediction_length
_lowercase : List[Any] = context_length if context_length is not None else prediction_length
_lowercase : Union[str, Any] = distribution_output
_lowercase : Optional[Any] = loss
_lowercase : Optional[int] = input_size
_lowercase : List[Any] = num_time_features
_lowercase : Optional[int] = lags_sequence
_lowercase : Tuple = scaling
_lowercase : Any = num_dynamic_real_features
_lowercase : Dict = num_static_real_features
_lowercase : List[str] = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(_lowerCAmelCase ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
_lowercase : Any = cardinality
else:
_lowercase : Optional[int] = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(_lowerCAmelCase ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
_lowercase : Union[str, Any] = embedding_dimension
else:
_lowercase : List[Any] = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
_lowercase : str = num_parallel_samples
# Transformer architecture configuration
_lowercase : Optional[Any] = input_size * len(self.lags_sequence ) + self._number_of_features
_lowercase : str = d_model
_lowercase : Optional[int] = encoder_attention_heads
_lowercase : List[str] = decoder_attention_heads
_lowercase : List[Any] = encoder_ffn_dim
_lowercase : Optional[Any] = decoder_ffn_dim
_lowercase : str = encoder_layers
_lowercase : Union[str, Any] = decoder_layers
_lowercase : List[str] = dropout
_lowercase : str = attention_dropout
_lowercase : Dict = activation_dropout
_lowercase : int = encoder_layerdrop
_lowercase : Tuple = decoder_layerdrop
_lowercase : Union[str, Any] = activation_function
_lowercase : Tuple = init_std
_lowercase : Dict = use_cache
# Autoformer
_lowercase : Tuple = label_length
_lowercase : str = moving_average
_lowercase : List[Any] = autocorrelation_factor
super().__init__(is_encoder_decoder=_lowerCAmelCase , **_lowerCAmelCase )
@property
def __a ( self ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 677 |
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
UpperCamelCase = logging.getLogger(__name__)
UpperCamelCase = {"facebook/bart-base": BartForConditionalGeneration}
UpperCamelCase = {"facebook/bart-base": BartTokenizer}
def __magic_name__ ( ) -> str:
_lowercase : Optional[int] = argparse.ArgumentParser(description='Export Bart model + Beam Search to ONNX graph.' )
parser.add_argument(
'--validation_file' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='A csv or a json file containing the validation data.' )
parser.add_argument(
'--max_length' , type=SCREAMING_SNAKE_CASE , default=5 , help='The maximum total input sequence length after tokenization.' , )
parser.add_argument(
'--num_beams' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help=(
'Number of beams to use for evaluation. This argument will be '
'passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'
) , )
parser.add_argument(
'--model_name_or_path' , type=SCREAMING_SNAKE_CASE , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=SCREAMING_SNAKE_CASE , )
parser.add_argument(
'--config_name' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='Pretrained config name or path if not the same as model_name' , )
parser.add_argument(
'--device' , type=SCREAMING_SNAKE_CASE , default='cpu' , help='Device where the model will be run' , )
parser.add_argument('--output_file_path' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='Where to store the final ONNX file.' )
_lowercase : Optional[Any] = parser.parse_args()
return args
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE="cpu" ) -> List[Any]:
_lowercase : Dict = model_dict[model_name].from_pretrained(SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE )
_lowercase : int = tokenizer_dict[model_name].from_pretrained(SCREAMING_SNAKE_CASE )
if model_name in ["facebook/bart-base"]:
_lowercase : Dict = 0
_lowercase : Optional[int] = None
_lowercase : Union[str, Any] = 0
return huggingface_model, tokenizer
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
model.eval()
_lowercase : List[Any] = None
_lowercase : List[str] = torch.jit.script(BARTBeamSearchGenerator(SCREAMING_SNAKE_CASE ) )
with torch.no_grad():
_lowercase : Optional[int] = 'My friends are cool but they eat too many carbs.'
_lowercase : int = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1_024 , return_tensors='pt' ).to(model.device )
_lowercase : str = model.generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , num_beams=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , early_stopping=SCREAMING_SNAKE_CASE , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
SCREAMING_SNAKE_CASE , (
inputs['input_ids'],
inputs['attention_mask'],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , SCREAMING_SNAKE_CASE , opset_version=14 , input_names=['input_ids', 'attention_mask', 'num_beams', 'max_length', 'decoder_start_token_id'] , output_names=['output_ids'] , dynamic_axes={
'input_ids': {0: 'batch', 1: 'seq'},
'output_ids': {0: 'batch', 1: 'seq_out'},
} , example_outputs=SCREAMING_SNAKE_CASE , )
logger.info('Model exported to {}'.format(SCREAMING_SNAKE_CASE ) )
_lowercase : str = remove_dup_initializers(os.path.abspath(SCREAMING_SNAKE_CASE ) )
logger.info('Deduplicated and optimized model written to {}'.format(SCREAMING_SNAKE_CASE ) )
_lowercase : Union[str, Any] = onnxruntime.InferenceSession(SCREAMING_SNAKE_CASE )
_lowercase : Union[str, Any] = ort_sess.run(
SCREAMING_SNAKE_CASE , {
'input_ids': inputs['input_ids'].cpu().numpy(),
'attention_mask': inputs['attention_mask'].cpu().numpy(),
'num_beams': np.array(SCREAMING_SNAKE_CASE ),
'max_length': np.array(SCREAMING_SNAKE_CASE ),
'decoder_start_token_id': np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info('Model outputs from torch and ONNX Runtime are similar.' )
logger.info('Success.' )
def __magic_name__ ( ) -> Any:
_lowercase : Dict = parse_args()
_lowercase : Union[str, Any] = 5
_lowercase : Union[str, Any] = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
_lowercase : Optional[Any] = torch.device(args.device )
_lowercase , _lowercase : List[Any] = load_model_tokenizer(args.model_name_or_path , SCREAMING_SNAKE_CASE )
if model.config.decoder_start_token_id is None:
raise ValueError('Make sure that `config.decoder_start_token_id` is correctly defined' )
model.to(SCREAMING_SNAKE_CASE )
if args.max_length:
_lowercase : Any = args.max_length
if args.num_beams:
_lowercase : List[str] = args.num_beams
if args.output_file_path:
_lowercase : Union[str, Any] = args.output_file_path
else:
_lowercase : Tuple = 'BART.onnx'
logger.info('Exporting model to ONNX' )
export_and_validate_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 677 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase = {
"configuration_swinv2": ["SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Swinv2Config"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Swinv2ForImageClassification",
"Swinv2ForMaskedImageModeling",
"Swinv2Model",
"Swinv2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 677 |
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class lowerCAmelCase_ ( __snake_case , __snake_case , unittest.TestCase ):
_UpperCamelCase : Union[str, Any] = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_UpperCamelCase : List[Any] = (
{
"feature-extraction": TFMobileBertModel,
"fill-mask": TFMobileBertForMaskedLM,
"question-answering": TFMobileBertForQuestionAnswering,
"text-classification": TFMobileBertForSequenceClassification,
"token-classification": TFMobileBertForTokenClassification,
"zero-shot": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_UpperCamelCase : int = False
_UpperCamelCase : Optional[int] = False
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ):
_lowercase : int = super()._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
if return_labels:
if model_class in get_values(_lowerCAmelCase ):
_lowercase : Optional[int] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class lowerCAmelCase_ ( __snake_case ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=1_3 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=9_9 , _lowerCAmelCase=3_2 , _lowerCAmelCase=3_2 , _lowerCAmelCase=2 , _lowerCAmelCase=4 , _lowerCAmelCase=3_7 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=5_1_2 , _lowerCAmelCase=1_6 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=None , ):
_lowercase : Optional[Any] = parent
_lowercase : str = batch_size
_lowercase : Optional[int] = seq_length
_lowercase : Tuple = is_training
_lowercase : List[Any] = use_input_mask
_lowercase : Optional[Any] = use_token_type_ids
_lowercase : Any = use_labels
_lowercase : str = vocab_size
_lowercase : List[Any] = hidden_size
_lowercase : Union[str, Any] = num_hidden_layers
_lowercase : Tuple = num_attention_heads
_lowercase : Optional[int] = intermediate_size
_lowercase : Tuple = hidden_act
_lowercase : Dict = hidden_dropout_prob
_lowercase : Optional[int] = attention_probs_dropout_prob
_lowercase : Tuple = max_position_embeddings
_lowercase : List[str] = type_vocab_size
_lowercase : Optional[Any] = type_sequence_label_size
_lowercase : List[Any] = initializer_range
_lowercase : List[str] = num_labels
_lowercase : Union[str, Any] = num_choices
_lowercase : List[str] = scope
_lowercase : Union[str, Any] = embedding_size
def __a ( self ):
_lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase : Optional[int] = None
if self.use_input_mask:
_lowercase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase : int = None
if self.use_token_type_ids:
_lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowercase : Dict = None
_lowercase : Any = None
_lowercase : int = None
if self.use_labels:
_lowercase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowercase : Dict = ids_tensor([self.batch_size] , self.num_choices )
_lowercase : Optional[Any] = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Union[str, Any] = TFMobileBertModel(config=_lowerCAmelCase )
_lowercase : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowercase : Union[str, Any] = model(_lowerCAmelCase )
_lowercase : Tuple = [input_ids, input_mask]
_lowercase : str = model(_lowerCAmelCase )
_lowercase : List[str] = model(_lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[int] = TFMobileBertForMaskedLM(config=_lowerCAmelCase )
_lowercase : Union[str, Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowercase : int = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Any = TFMobileBertForNextSentencePrediction(config=_lowerCAmelCase )
_lowercase : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowercase : Optional[int] = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[Any] = TFMobileBertForPreTraining(config=_lowerCAmelCase )
_lowercase : Tuple = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowercase : Union[str, Any] = model(_lowerCAmelCase )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[int] = self.num_labels
_lowercase : Tuple = TFMobileBertForSequenceClassification(config=_lowerCAmelCase )
_lowercase : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowercase : List[str] = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[Any] = self.num_choices
_lowercase : List[str] = TFMobileBertForMultipleChoice(config=_lowerCAmelCase )
_lowercase : Optional[int] = tf.tile(tf.expand_dims(_lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_lowercase : Optional[int] = tf.tile(tf.expand_dims(_lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_lowercase : Tuple = tf.tile(tf.expand_dims(_lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_lowercase : str = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
_lowercase : Union[str, Any] = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : List[str] = self.num_labels
_lowercase : int = TFMobileBertForTokenClassification(config=_lowerCAmelCase )
_lowercase : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowercase : List[str] = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Tuple = TFMobileBertForQuestionAnswering(config=_lowerCAmelCase )
_lowercase : Any = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowercase : int = model(_lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self ):
_lowercase : List[str] = self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : int = config_and_inputs
_lowercase : Tuple = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
def __a ( self ):
_lowercase : List[str] = TFMobileBertModelTest.TFMobileBertModelTester(self )
_lowercase : Union[str, Any] = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=3_7 )
def __a ( self ):
self.config_tester.run_common_tests()
def __a ( self ):
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*_lowerCAmelCase )
def __a ( self ):
_lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*_lowerCAmelCase )
def __a ( self ):
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*_lowerCAmelCase )
def __a ( self ):
_lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*_lowerCAmelCase )
def __a ( self ):
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*_lowerCAmelCase )
def __a ( self ):
_lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*_lowerCAmelCase )
def __a ( self ):
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*_lowerCAmelCase )
def __a ( self ):
_lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*_lowerCAmelCase )
@slow
def __a ( self ):
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
_lowercase : List[str] = TFMobileBertModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def __a ( self ):
_lowercase : Dict = TFMobileBertForPreTraining.from_pretrained('google/mobilebert-uncased' )
_lowercase : Union[str, Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
_lowercase : List[str] = model(_lowerCAmelCase )[0]
_lowercase : str = [1, 6, 3_0_5_2_2]
self.assertEqual(output.shape , _lowerCAmelCase )
_lowercase : List[Any] = tf.constant(
[
[
[-4.5_91_95_47, -9.24_82_95, -9.64_52_56],
[-6.7_30_61_75, -6.44_02_84, -6.6_05_28_37],
[-7.2_74_35_06, -6.7_84_79_15, -6.02_46_73],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _lowerCAmelCase , atol=1E-4 )
| 677 | 1 |
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> str:
return " ".join(
''.join(word[::-1] ) if len(SCREAMING_SNAKE_CASE ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("Hey wollef sroirraw"))
| 677 |
import qiskit
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> qiskit.result.counts.Counts:
_lowercase : Union[str, Any] = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
_lowercase : Optional[Any] = qiskit.QuantumCircuit(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
_lowercase : Optional[Any] = qiskit.execute(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , shots=1_000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCamelCase = single_qubit_measure(2, 2)
print(f'''Total count for various states are: {counts}''')
| 677 | 1 |
import os
from datetime import datetime as dt
from github import Github
UpperCamelCase = [
"good first issue",
"good second issue",
"good difficult issue",
"enhancement",
"new pipeline/model",
"new scheduler",
"wip",
]
def __magic_name__ ( ) -> List[Any]:
_lowercase : Tuple = Github(os.environ['GITHUB_TOKEN'] )
_lowercase : Dict = g.get_repo('huggingface/diffusers' )
_lowercase : int = repo.get_issues(state='open' )
for issue in open_issues:
_lowercase : Optional[Any] = sorted(issue.get_comments() , key=lambda SCREAMING_SNAKE_CASE : i.created_at , reverse=SCREAMING_SNAKE_CASE )
_lowercase : Union[str, Any] = comments[0] if len(SCREAMING_SNAKE_CASE ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='closed' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='open' )
issue.remove_from_labels('stale' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
issue.add_to_labels('stale' )
if __name__ == "__main__":
main()
| 677 |
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCamelCase = "platform"
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , ) -> Dict:
if attention_mask is None:
_lowercase : str = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
_lowercase : List[Any] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
_lowercase : List[str] = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_lowercase : Optional[int] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_lowercase : str = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=1_3 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase=9_9 , _lowerCAmelCase=1_6 , _lowerCAmelCase=2 , _lowerCAmelCase=4 , _lowerCAmelCase=4 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=3_2 , _lowerCAmelCase=2 , _lowerCAmelCase=1 , _lowerCAmelCase=0 , _lowerCAmelCase=0.02 , ):
_lowercase : List[str] = parent
_lowercase : List[Any] = batch_size
_lowercase : Optional[Any] = seq_length
_lowercase : Optional[Any] = is_training
_lowercase : Tuple = use_labels
_lowercase : Dict = vocab_size
_lowercase : Any = hidden_size
_lowercase : Optional[Any] = num_hidden_layers
_lowercase : Union[str, Any] = num_attention_heads
_lowercase : Tuple = intermediate_size
_lowercase : Any = hidden_act
_lowercase : Optional[Any] = hidden_dropout_prob
_lowercase : Tuple = attention_probs_dropout_prob
_lowercase : Any = max_position_embeddings
_lowercase : str = eos_token_id
_lowercase : int = pad_token_id
_lowercase : Tuple = bos_token_id
_lowercase : List[Any] = initializer_range
def __a ( self ):
_lowercase : str = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
_lowercase : List[Any] = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
_lowercase : List[str] = shift_tokens_right(_lowerCAmelCase , 1 , 2 )
_lowercase : Tuple = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_lowerCAmelCase , )
_lowercase : List[Any] = prepare_blenderbot_inputs_dict(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return config, inputs_dict
def __a ( self ):
_lowercase , _lowercase : Union[str, Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[Any] = 2_0
_lowercase : List[Any] = model_class_name(_lowerCAmelCase )
_lowercase : List[Any] = model.encode(inputs_dict['input_ids'] )
_lowercase , _lowercase : int = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
_lowercase : Optional[Any] = model.init_cache(decoder_input_ids.shape[0] , _lowerCAmelCase , _lowerCAmelCase )
_lowercase : Optional[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' )
_lowercase : int = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_lowercase : Union[str, Any] = model.decode(
decoder_input_ids[:, :-1] , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase , decoder_position_ids=_lowerCAmelCase , )
_lowercase : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
_lowercase : int = model.decode(
decoder_input_ids[:, -1:] , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_lowerCAmelCase , )
_lowercase : List[Any] = model.decode(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : Optional[int] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Dict = 2_0
_lowercase : Any = model_class_name(_lowerCAmelCase )
_lowercase : int = model.encode(inputs_dict['input_ids'] )
_lowercase , _lowercase : Optional[int] = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
_lowercase : Union[str, Any] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_lowercase : List[str] = model.init_cache(decoder_input_ids.shape[0] , _lowerCAmelCase , _lowerCAmelCase )
_lowercase : int = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_lowercase : List[Any] = model.decode(
decoder_input_ids[:, :-1] , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase , decoder_position_ids=_lowerCAmelCase , )
_lowercase : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
_lowercase : Union[str, Any] = model.decode(
decoder_input_ids[:, -1:] , _lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_lowerCAmelCase , decoder_position_ids=_lowerCAmelCase , )
_lowercase : Dict = model.decode(_lowerCAmelCase , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase )
_lowercase : Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
@require_flax
class lowerCAmelCase_ ( unittest.TestCase ):
_UpperCamelCase : Tuple = 99
def __a ( self ):
_lowercase : Dict = np.array(
[
[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2],
[6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2],
[5, 9_7, 1_7, 3_9, 9_4, 4_0, 2],
[7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2],
[8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2],
[5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding
[6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2],
[5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2],
[4_8, 6_1, 9, 2_4, 7_1, 8_2, 2],
[2_6, 1, 6_0, 4_8, 2_2, 1_3, 2],
[2_1, 5, 6_2, 2_8, 1_4, 7_6, 2],
[4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2],
[7_0, 7_0, 5_0, 9, 2_8, 0, 2],
] , dtype=np.intaa , )
_lowercase : Union[str, Any] = input_ids.shape[0]
_lowercase : Optional[int] = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def __a ( self ):
_lowercase , _lowercase , _lowercase : int = self._get_config_and_data()
_lowercase : Union[str, Any] = FlaxBlenderbotSmallForConditionalGeneration(_lowerCAmelCase )
_lowercase : Union[str, Any] = lm_model(input_ids=_lowerCAmelCase )
_lowercase : str = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['logits'].shape , _lowerCAmelCase )
def __a ( self ):
_lowercase : Union[str, Any] = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , )
_lowercase : Optional[int] = FlaxBlenderbotSmallForConditionalGeneration(_lowerCAmelCase )
_lowercase : Optional[Any] = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa )
_lowercase : Optional[int] = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa )
_lowercase : Dict = lm_model(input_ids=_lowerCAmelCase , decoder_input_ids=_lowerCAmelCase )
_lowercase : Tuple = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['logits'].shape , _lowerCAmelCase )
def __a ( self ):
_lowercase : Dict = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa )
_lowercase : Union[str, Any] = shift_tokens_right(_lowerCAmelCase , 1 , 2 )
_lowercase : Dict = np.equal(_lowerCAmelCase , 1 ).astype(np.floataa ).sum()
_lowercase : Dict = np.equal(_lowerCAmelCase , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(_lowerCAmelCase , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class lowerCAmelCase_ ( __snake_case , unittest.TestCase , __snake_case ):
_UpperCamelCase : int = True
_UpperCamelCase : Any = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
_UpperCamelCase : Any = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def __a ( self ):
_lowercase : List[str] = FlaxBlenderbotSmallModelTester(self )
def __a ( self ):
_lowercase , _lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def __a ( self ):
_lowercase , _lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def __a ( self ):
_lowercase , _lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowercase : Any = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : str = model_class(_lowerCAmelCase )
@jax.jit
def encode_jitted(_lowerCAmelCase , _lowerCAmelCase=None , **_lowerCAmelCase ):
return model.encode(input_ids=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
with self.subTest('JIT Enabled' ):
_lowercase : Dict = encode_jitted(**_lowerCAmelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
_lowercase : Dict = encode_jitted(**_lowerCAmelCase ).to_tuple()
self.assertEqual(len(_lowerCAmelCase ) , len(_lowerCAmelCase ) )
for jitted_output, output in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def __a ( self ):
_lowercase , _lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowercase : int = model_class(_lowerCAmelCase )
_lowercase : int = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] )
_lowercase : List[Any] = {
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
return model.decode(
decoder_input_ids=_lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , encoder_outputs=_lowerCAmelCase , )
with self.subTest('JIT Enabled' ):
_lowercase : Dict = decode_jitted(**_lowerCAmelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
_lowercase : Any = decode_jitted(**_lowerCAmelCase ).to_tuple()
self.assertEqual(len(_lowerCAmelCase ) , len(_lowerCAmelCase ) )
for jitted_output, output in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def __a ( self ):
for model_class_name in self.all_model_classes:
_lowercase : Dict = model_class_name.from_pretrained('facebook/blenderbot_small-90M' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
_lowercase : Any = np.ones((1, 1) ) * model.config.eos_token_id
_lowercase : int = model(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
| 677 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase = {
"vocab_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-german-cased": (
"https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"
),
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"
),
},
}
UpperCamelCase = {
"distilbert-base-uncased": 512,
"distilbert-base-uncased-distilled-squad": 512,
"distilbert-base-cased": 512,
"distilbert-base-cased-distilled-squad": 512,
"distilbert-base-german-cased": 512,
"distilbert-base-multilingual-cased": 512,
}
UpperCamelCase = {
"distilbert-base-uncased": {"do_lower_case": True},
"distilbert-base-uncased-distilled-squad": {"do_lower_case": True},
"distilbert-base-cased": {"do_lower_case": False},
"distilbert-base-cased-distilled-squad": {"do_lower_case": False},
"distilbert-base-german-cased": {"do_lower_case": False},
"distilbert-base-multilingual-cased": {"do_lower_case": False},
}
class lowerCAmelCase_ ( __snake_case ):
_UpperCamelCase : Optional[Any] = VOCAB_FILES_NAMES
_UpperCamelCase : int = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Optional[int] = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase : Union[str, Any] = ["input_ids", "attention_mask"]
_UpperCamelCase : Any = DistilBertTokenizer
def __init__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase="[UNK]" , _lowerCAmelCase="[SEP]" , _lowerCAmelCase="[PAD]" , _lowerCAmelCase="[CLS]" , _lowerCAmelCase="[MASK]" , _lowerCAmelCase=True , _lowerCAmelCase=None , **_lowerCAmelCase , ):
super().__init__(
_lowerCAmelCase , tokenizer_file=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , tokenize_chinese_chars=_lowerCAmelCase , strip_accents=_lowerCAmelCase , **_lowerCAmelCase , )
_lowercase : Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _lowerCAmelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , _lowerCAmelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _lowerCAmelCase ) != tokenize_chinese_chars
):
_lowercase : List[str] = getattr(_lowerCAmelCase , normalizer_state.pop('type' ) )
_lowercase : Optional[int] = do_lower_case
_lowercase : Union[str, Any] = strip_accents
_lowercase : Any = tokenize_chinese_chars
_lowercase : List[Any] = normalizer_class(**_lowerCAmelCase )
_lowercase : Dict = do_lower_case
def __a ( self , _lowerCAmelCase , _lowerCAmelCase=None ):
_lowercase : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __a ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
_lowercase : Optional[int] = [self.sep_token_id]
_lowercase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
_lowercase : Union[str, Any] = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
| 677 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json",
"allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json",
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"
),
}
class lowerCAmelCase_ ( __snake_case ):
_UpperCamelCase : Dict = "longformer"
def __init__( self , _lowerCAmelCase = 5_1_2 , _lowerCAmelCase = 2 , _lowerCAmelCase = 1 , _lowerCAmelCase = 0 , _lowerCAmelCase = 2 , _lowerCAmelCase = 3_0_5_2_2 , _lowerCAmelCase = 7_6_8 , _lowerCAmelCase = 1_2 , _lowerCAmelCase = 1_2 , _lowerCAmelCase = 3_0_7_2 , _lowerCAmelCase = "gelu" , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 5_1_2 , _lowerCAmelCase = 2 , _lowerCAmelCase = 0.02 , _lowerCAmelCase = 1E-12 , _lowerCAmelCase = False , **_lowerCAmelCase , ):
super().__init__(pad_token_id=_lowerCAmelCase , **_lowerCAmelCase )
_lowercase : Optional[int] = attention_window
_lowercase : str = sep_token_id
_lowercase : Optional[Any] = bos_token_id
_lowercase : List[Any] = eos_token_id
_lowercase : Optional[Any] = vocab_size
_lowercase : List[Any] = hidden_size
_lowercase : Union[str, Any] = num_hidden_layers
_lowercase : Optional[int] = num_attention_heads
_lowercase : List[str] = hidden_act
_lowercase : List[str] = intermediate_size
_lowercase : List[Any] = hidden_dropout_prob
_lowercase : str = attention_probs_dropout_prob
_lowercase : Any = max_position_embeddings
_lowercase : int = type_vocab_size
_lowercase : Optional[int] = initializer_range
_lowercase : List[Any] = layer_norm_eps
_lowercase : List[str] = onnx_export
class lowerCAmelCase_ ( __snake_case ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase = "default" , _lowerCAmelCase = None ):
super().__init__(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
_lowercase : str = True
@property
def __a ( self ):
if self.task == "multiple-choice":
_lowercase : List[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowercase : int = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('global_attention_mask', dynamic_axis),
] )
@property
def __a ( self ):
_lowercase : Optional[int] = super().outputs
if self.task == "default":
_lowercase : List[str] = {0: 'batch'}
return outputs
@property
def __a ( self ):
return 1E-4
@property
def __a ( self ):
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 1_4 )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ):
_lowercase : int = super().generate_dummy_inputs(
preprocessor=_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
_lowercase : str = torch.zeros_like(inputs['input_ids'] )
# make every second token global
_lowercase : Any = 1
return inputs
| 677 | 1 |
import numpy as np
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> np.array:
return 1 / (1 + np.exp(-vector ))
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> np.array:
return vector * sigmoid(1.702 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 677 |
from __future__ import annotations
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> bool:
return len(set(SCREAMING_SNAKE_CASE ) ) == len(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 677 | 1 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class lowerCAmelCase_ ( __snake_case ):
@staticmethod
@abstractmethod
def __a ( _lowerCAmelCase ):
raise NotImplementedError()
@abstractmethod
def __a ( self ):
raise NotImplementedError()
| 677 |
import math
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0 , SCREAMING_SNAKE_CASE = 0 ) -> list:
_lowercase : List[str] = end or len(SCREAMING_SNAKE_CASE )
for i in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
_lowercase : Dict = i
_lowercase : str = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
_lowercase : Optional[Any] = array[temp_index - 1]
temp_index -= 1
_lowercase : Optional[Any] = temp_index_value
return array
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> None: # Max Heap
_lowercase : List[str] = index
_lowercase : List[str] = 2 * index + 1 # Left Node
_lowercase : Union[str, Any] = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
_lowercase : Any = left_index
if right_index < heap_size and array[largest] < array[right_index]:
_lowercase : str = right_index
if largest != index:
_lowercase , _lowercase : List[str] = array[largest], array[index]
heapify(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> list:
_lowercase : Optional[Any] = len(SCREAMING_SNAKE_CASE )
for i in range(n // 2 , -1 , -1 ):
heapify(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for i in range(n - 1 , 0 , -1 ):
_lowercase , _lowercase : List[Any] = array[0], array[i]
heapify(SCREAMING_SNAKE_CASE , 0 , SCREAMING_SNAKE_CASE )
return array
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
_lowercase : Optional[Any] = low
_lowercase : Tuple = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
_lowercase , _lowercase : Tuple = array[j], array[i]
i += 1
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> list:
if len(SCREAMING_SNAKE_CASE ) == 0:
return array
_lowercase : List[str] = 2 * math.ceil(math.loga(len(SCREAMING_SNAKE_CASE ) ) )
_lowercase : str = 16
return intro_sort(SCREAMING_SNAKE_CASE , 0 , len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> list:
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(SCREAMING_SNAKE_CASE )
max_depth -= 1
_lowercase : int = median_of_a(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , start + ((end - start) // 2) + 1 , end - 1 )
_lowercase : str = partition(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
intro_sort(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_lowercase : List[Any] = p
return insertion_sort(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase = input("Enter numbers separated by a comma : ").strip()
UpperCamelCase = [float(item) for item in user_input.split(",")]
print(sort(unsorted))
| 677 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
UpperCamelCase = None
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
UpperCamelCase = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"
),
},
}
UpperCamelCase = {
"facebook/nllb-large-en-ro": 1_024,
"facebook/nllb-200-distilled-600M": 1_024,
}
# fmt: off
UpperCamelCase = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class lowerCAmelCase_ ( __snake_case ):
_UpperCamelCase : str = VOCAB_FILES_NAMES
_UpperCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : List[Any] = ["input_ids", "attention_mask"]
_UpperCamelCase : Union[str, Any] = NllbTokenizer
_UpperCamelCase : List[int] = []
_UpperCamelCase : List[int] = []
def __init__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase="<s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="<s>" , _lowerCAmelCase="<unk>" , _lowerCAmelCase="<pad>" , _lowerCAmelCase="<mask>" , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=False , **_lowerCAmelCase , ):
# Mask token behave like a normal word, i.e. include the space before it
_lowercase : Any = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else mask_token
_lowercase : Any = legacy_behaviour
super().__init__(
vocab_file=_lowerCAmelCase , tokenizer_file=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , src_lang=_lowerCAmelCase , tgt_lang=_lowerCAmelCase , additional_special_tokens=_lowerCAmelCase , legacy_behaviour=_lowerCAmelCase , **_lowerCAmelCase , )
_lowercase : Tuple = vocab_file
_lowercase : Optional[int] = False if not self.vocab_file else True
_lowercase : Optional[int] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
_lowercase : Dict = {
lang_code: self.convert_tokens_to_ids(_lowerCAmelCase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_lowercase : Any = src_lang if src_lang is not None else 'eng_Latn'
_lowercase : Any = self.convert_tokens_to_ids(self._src_lang )
_lowercase : Optional[Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __a ( self ):
return self._src_lang
@src_lang.setter
def __a ( self , _lowerCAmelCase ):
_lowercase : str = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __a ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
_lowercase : Union[str, Any] = [self.sep_token_id]
_lowercase : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ):
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
_lowercase : Dict = src_lang
_lowercase : Any = self(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase )
_lowercase : Any = self.convert_tokens_to_ids(_lowerCAmelCase )
_lowercase : List[Any] = tgt_lang_id
return inputs
def __a ( self , _lowerCAmelCase , _lowerCAmelCase = "eng_Latn" , _lowerCAmelCase = None , _lowerCAmelCase = "fra_Latn" , **_lowerCAmelCase , ):
_lowercase : Dict = src_lang
_lowercase : Dict = tgt_lang
return super().prepare_seqaseq_batch(_lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase )
def __a ( self ):
return self.set_src_lang_special_tokens(self.src_lang )
def __a ( self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __a ( self , _lowerCAmelCase ):
_lowercase : Optional[Any] = self.convert_tokens_to_ids(_lowerCAmelCase )
if self.legacy_behaviour:
_lowercase : Optional[Any] = []
_lowercase : str = [self.eos_token_id, self.cur_lang_code]
else:
_lowercase : int = [self.cur_lang_code]
_lowercase : Optional[int] = [self.eos_token_id]
_lowercase : Tuple = self.convert_ids_to_tokens(self.prefix_tokens )
_lowercase : int = self.convert_ids_to_tokens(self.suffix_tokens )
_lowercase : Any = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __a ( self , _lowerCAmelCase ):
_lowercase : Optional[Any] = self.convert_tokens_to_ids(_lowerCAmelCase )
if self.legacy_behaviour:
_lowercase : str = []
_lowercase : int = [self.eos_token_id, self.cur_lang_code]
else:
_lowercase : Optional[Any] = [self.cur_lang_code]
_lowercase : Optional[Any] = [self.eos_token_id]
_lowercase : int = self.convert_ids_to_tokens(self.prefix_tokens )
_lowercase : List[str] = self.convert_ids_to_tokens(self.suffix_tokens )
_lowercase : int = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory.""" )
return
_lowercase : Union[str, Any] = os.path.join(
_lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ):
copyfile(self.vocab_file , _lowerCAmelCase )
return (out_vocab_file,)
| 677 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["CLIPFeatureExtractor"]
UpperCamelCase = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 677 | 1 |
import math
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0 , SCREAMING_SNAKE_CASE = 0 ) -> list:
_lowercase : List[str] = end or len(SCREAMING_SNAKE_CASE )
for i in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
_lowercase : Dict = i
_lowercase : str = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
_lowercase : Optional[Any] = array[temp_index - 1]
temp_index -= 1
_lowercase : Optional[Any] = temp_index_value
return array
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> None: # Max Heap
_lowercase : List[str] = index
_lowercase : List[str] = 2 * index + 1 # Left Node
_lowercase : Union[str, Any] = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
_lowercase : Any = left_index
if right_index < heap_size and array[largest] < array[right_index]:
_lowercase : str = right_index
if largest != index:
_lowercase , _lowercase : List[str] = array[largest], array[index]
heapify(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> list:
_lowercase : Optional[Any] = len(SCREAMING_SNAKE_CASE )
for i in range(n // 2 , -1 , -1 ):
heapify(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for i in range(n - 1 , 0 , -1 ):
_lowercase , _lowercase : List[Any] = array[0], array[i]
heapify(SCREAMING_SNAKE_CASE , 0 , SCREAMING_SNAKE_CASE )
return array
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
_lowercase : Optional[Any] = low
_lowercase : Tuple = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
_lowercase , _lowercase : Tuple = array[j], array[i]
i += 1
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> list:
if len(SCREAMING_SNAKE_CASE ) == 0:
return array
_lowercase : List[str] = 2 * math.ceil(math.loga(len(SCREAMING_SNAKE_CASE ) ) )
_lowercase : str = 16
return intro_sort(SCREAMING_SNAKE_CASE , 0 , len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> list:
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(SCREAMING_SNAKE_CASE )
max_depth -= 1
_lowercase : int = median_of_a(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , start + ((end - start) // 2) + 1 , end - 1 )
_lowercase : str = partition(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
intro_sort(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_lowercase : List[Any] = p
return insertion_sort(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase = input("Enter numbers separated by a comma : ").strip()
UpperCamelCase = [float(item) for item in user_input.split(",")]
print(sort(unsorted))
| 677 |
from collections.abc import Sequence
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float:
return sum(c * (x**i) for i, c in enumerate(SCREAMING_SNAKE_CASE ) )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float:
_lowercase : Optional[Any] = 0.0
for coeff in reversed(SCREAMING_SNAKE_CASE ):
_lowercase : Optional[int] = result * x + coeff
return result
if __name__ == "__main__":
UpperCamelCase = (0.0, 0.0, 5.0, 9.3, 7.0)
UpperCamelCase = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 677 | 1 |
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=0 ) -> List[str]:
return sorted(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : x[column] )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=float('inf' ) ) -> List[Any]:
for i in range(points_counts - 1 ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE ):
_lowercase : Tuple = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
_lowercase : int = current_dis
return min_dis
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=float('inf' ) ) -> Dict:
for i in range(min(6 , points_counts - 1 ) , SCREAMING_SNAKE_CASE ):
for j in range(max(0 , i - 6 ) , SCREAMING_SNAKE_CASE ):
_lowercase : Dict = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
_lowercase : Dict = current_dis
return min_dis
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]:
# base case
if points_counts <= 3:
return dis_between_closest_pair(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# recursion
_lowercase : Tuple = points_counts // 2
_lowercase : Any = closest_pair_of_points_sqr(
SCREAMING_SNAKE_CASE , points_sorted_on_y[:mid] , SCREAMING_SNAKE_CASE )
_lowercase : List[str] = closest_pair_of_points_sqr(
SCREAMING_SNAKE_CASE , points_sorted_on_y[mid:] , points_counts - mid )
_lowercase : Tuple = min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_lowercase : List[str] = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(SCREAMING_SNAKE_CASE )
_lowercase : Union[str, Any] = dis_between_closest_in_strip(
SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
return min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple:
_lowercase : int = column_based_sort(SCREAMING_SNAKE_CASE , column=0 )
_lowercase : Dict = column_based_sort(SCREAMING_SNAKE_CASE , column=1 )
return (
closest_pair_of_points_sqr(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
) ** 0.5
if __name__ == "__main__":
UpperCamelCase = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print("Distance:", closest_pair_of_points(points, len(points)))
| 677 |
from __future__ import annotations
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase=None ):
_lowercase : int = data
_lowercase : Union[str, Any] = None
def __repr__( self ):
_lowercase : Dict = []
_lowercase : Tuple = self
while temp:
string_rep.append(F"""{temp.data}""" )
_lowercase : Optional[Any] = temp.next
return "->".join(_lowerCAmelCase )
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Any:
if not elements_list:
raise Exception('The Elements List is empty' )
_lowercase : Union[str, Any] = Node(elements_list[0] )
for i in range(1 , len(SCREAMING_SNAKE_CASE ) ):
_lowercase : Optional[int] = Node(elements_list[i] )
_lowercase : List[Any] = current.next
return head
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> None:
if head_node is not None and isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
print_reverse(head_node.next )
print(head_node.data )
def __magic_name__ ( ) -> List[str]:
from doctest import testmod
testmod()
_lowercase : int = make_linked_list([14, 52, 14, 12, 43] )
print('Linked List:' )
print(SCREAMING_SNAKE_CASE )
print('Elements in Reverse:' )
print_reverse(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 677 | 1 |
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
return {key.lstrip('-' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def __magic_name__ ( ) -> Union[str, Any]:
_lowercase : List[str] = ArgumentParser(
'HuggingFace Datasets CLI tool' , usage='datasets-cli <command> [<args>]' , allow_abbrev=SCREAMING_SNAKE_CASE )
_lowercase : int = parser.add_subparsers(help='datasets-cli command helpers' )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(SCREAMING_SNAKE_CASE )
EnvironmentCommand.register_subcommand(SCREAMING_SNAKE_CASE )
TestCommand.register_subcommand(SCREAMING_SNAKE_CASE )
RunBeamCommand.register_subcommand(SCREAMING_SNAKE_CASE )
DummyDataCommand.register_subcommand(SCREAMING_SNAKE_CASE )
# Parse args
_lowercase , _lowercase : Tuple = parser.parse_known_args()
if not hasattr(SCREAMING_SNAKE_CASE , 'func' ):
parser.print_help()
exit(1 )
_lowercase : List[str] = parse_unknown_args(SCREAMING_SNAKE_CASE )
# Run
_lowercase : int = args.func(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
service.run()
if __name__ == "__main__":
main()
| 677 |
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
UpperCamelCase = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
UpperCamelCase = typing.Union[np.floataa, int, float] # noqa: UP007
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> VectorOut:
return np.sqrt(np.sum((np.asarray(SCREAMING_SNAKE_CASE ) - np.asarray(SCREAMING_SNAKE_CASE )) ** 2 ) )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> VectorOut:
return sum((va - va) ** 2 for va, va in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) ** (1 / 2)
if __name__ == "__main__":
def __magic_name__ ( ) -> None:
from timeit import timeit
print('Without Numpy' )
print(
timeit(
'euclidean_distance_no_np([1, 2, 3], [4, 5, 6])' , number=10_000 , globals=globals() , ) )
print('With Numpy' )
print(
timeit(
'euclidean_distance([1, 2, 3], [4, 5, 6])' , number=10_000 , globals=globals() , ) )
benchmark()
| 677 | 1 |
import sys
UpperCamelCase = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def __magic_name__ ( SCREAMING_SNAKE_CASE = N ) -> int:
_lowercase : Union[str, Any] = -sys.maxsize - 1
for i in range(len(SCREAMING_SNAKE_CASE ) - 12 ):
_lowercase : Optional[Any] = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
_lowercase : str = product
return largest_product
if __name__ == "__main__":
print(f'''{solution() = }''')
| 677 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase = {
"configuration_swinv2": ["SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Swinv2Config"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Swinv2ForImageClassification",
"Swinv2ForMaskedImageModeling",
"Swinv2Model",
"Swinv2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 677 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase = {
"configuration_blip_2": [
"BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Blip2Config",
"Blip2QFormerConfig",
"Blip2VisionConfig",
],
"processing_blip_2": ["Blip2Processor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Blip2Model",
"Blip2QFormerModel",
"Blip2PreTrainedModel",
"Blip2ForConditionalGeneration",
"Blip2VisionModel",
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 677 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
UpperCamelCase = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase = {
"vocab_file": {
"google/electra-small-generator": (
"https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt"
),
"google/electra-base-generator": "https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt",
"google/electra-large-generator": (
"https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt"
),
"google/electra-small-discriminator": (
"https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt"
),
"google/electra-base-discriminator": (
"https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt"
),
"google/electra-large-discriminator": (
"https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"google/electra-small-generator": (
"https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json"
),
"google/electra-base-generator": (
"https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json"
),
"google/electra-large-generator": (
"https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json"
),
"google/electra-small-discriminator": (
"https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json"
),
"google/electra-base-discriminator": (
"https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json"
),
"google/electra-large-discriminator": (
"https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json"
),
},
}
UpperCamelCase = {
"google/electra-small-generator": 512,
"google/electra-base-generator": 512,
"google/electra-large-generator": 512,
"google/electra-small-discriminator": 512,
"google/electra-base-discriminator": 512,
"google/electra-large-discriminator": 512,
}
UpperCamelCase = {
"google/electra-small-generator": {"do_lower_case": True},
"google/electra-base-generator": {"do_lower_case": True},
"google/electra-large-generator": {"do_lower_case": True},
"google/electra-small-discriminator": {"do_lower_case": True},
"google/electra-base-discriminator": {"do_lower_case": True},
"google/electra-large-discriminator": {"do_lower_case": True},
}
class lowerCAmelCase_ ( __snake_case ):
_UpperCamelCase : Any = VOCAB_FILES_NAMES
_UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : str = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : List[str] = ElectraTokenizer
def __init__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase="[UNK]" , _lowerCAmelCase="[SEP]" , _lowerCAmelCase="[PAD]" , _lowerCAmelCase="[CLS]" , _lowerCAmelCase="[MASK]" , _lowerCAmelCase=True , _lowerCAmelCase=None , **_lowerCAmelCase , ):
super().__init__(
_lowerCAmelCase , tokenizer_file=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , tokenize_chinese_chars=_lowerCAmelCase , strip_accents=_lowerCAmelCase , **_lowerCAmelCase , )
_lowercase : Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _lowerCAmelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , _lowerCAmelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _lowerCAmelCase ) != tokenize_chinese_chars
):
_lowercase : Any = getattr(_lowerCAmelCase , normalizer_state.pop('type' ) )
_lowercase : Dict = do_lower_case
_lowercase : Optional[Any] = strip_accents
_lowercase : Any = tokenize_chinese_chars
_lowercase : Tuple = normalizer_class(**_lowerCAmelCase )
_lowercase : Union[str, Any] = do_lower_case
def __a ( self , _lowerCAmelCase , _lowerCAmelCase=None ):
_lowercase : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __a ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
_lowercase : str = [self.sep_token_id]
_lowercase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
_lowercase : Any = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
| 677 | 1 |
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class lowerCAmelCase_ ( __snake_case ):
_UpperCamelCase : torch.FloatTensor
_UpperCamelCase : Optional[torch.FloatTensor] = None
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=0.999 , SCREAMING_SNAKE_CASE="cosine" , ) -> Union[str, Any]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(SCREAMING_SNAKE_CASE ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(SCREAMING_SNAKE_CASE ):
return math.exp(t * -12.0 )
else:
raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
_lowercase : Optional[int] = []
for i in range(SCREAMING_SNAKE_CASE ):
_lowercase : Tuple = i / num_diffusion_timesteps
_lowercase : str = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(SCREAMING_SNAKE_CASE ) / alpha_bar_fn(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) )
return torch.tensor(SCREAMING_SNAKE_CASE , dtype=torch.floataa )
class lowerCAmelCase_ ( __snake_case , __snake_case ):
_UpperCamelCase : Optional[int] = 1
@register_to_config
def __init__( self , _lowerCAmelCase = 1_0_0_0 , _lowerCAmelCase = 0.00_01 , _lowerCAmelCase = 0.02 , _lowerCAmelCase = "linear" , _lowerCAmelCase = None , _lowerCAmelCase = True , _lowerCAmelCase = True , _lowerCAmelCase = 0 , _lowerCAmelCase = "epsilon" , _lowerCAmelCase = 1.0 , **_lowerCAmelCase , ):
if kwargs.get('set_alpha_to_one' , _lowerCAmelCase ) is not None:
_lowercase : Tuple = (
'The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.'
)
deprecate('set_alpha_to_one' , '1.0.0' , _lowerCAmelCase , standard_warn=_lowerCAmelCase )
_lowercase : Optional[int] = kwargs['set_alpha_to_one']
if trained_betas is not None:
_lowercase : List[Any] = torch.tensor(_lowerCAmelCase , dtype=torch.floataa )
elif beta_schedule == "linear":
_lowercase : int = torch.linspace(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_lowercase : Tuple = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , _lowerCAmelCase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_lowercase : Union[str, Any] = betas_for_alpha_bar(_lowerCAmelCase )
else:
raise NotImplementedError(F"""{beta_schedule} does is not implemented for {self.__class__}""" )
_lowercase : Optional[int] = 1.0 - self.betas
_lowercase : List[str] = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
_lowercase : Tuple = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
_lowercase : Optional[int] = 1.0
# setable values
_lowercase : str = None
_lowercase : str = torch.from_numpy(np.arange(0 , _lowerCAmelCase ).copy().astype(np.intaa ) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
return sample
def __a ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
F"""`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:"""
F""" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle"""
F""" maximal {self.config.num_train_timesteps} timesteps.""" )
_lowercase : Optional[Any] = num_inference_steps
_lowercase : Optional[Any] = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_lowercase : str = (np.arange(0 , _lowerCAmelCase ) * step_ratio).round().copy().astype(np.intaa )
_lowercase : Optional[Any] = torch.from_numpy(_lowerCAmelCase ).to(_lowerCAmelCase )
self.timesteps += self.config.steps_offset
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 0.0 , _lowerCAmelCase = False , _lowerCAmelCase = None , _lowerCAmelCase = True , ):
# 1. get previous step value (=t+1)
_lowercase : Union[str, Any] = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
_lowercase : List[str] = self.alphas_cumprod[timestep]
_lowercase : int = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
_lowercase : int = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
_lowercase : List[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
_lowercase : int = model_output
elif self.config.prediction_type == "sample":
_lowercase : Optional[int] = model_output
_lowercase : Optional[int] = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
_lowercase : Any = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
_lowercase : Union[str, Any] = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or"""
' `v_prediction`' )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
_lowercase : List[str] = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_lowercase : Optional[int] = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_lowercase : List[str] = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=_lowerCAmelCase , pred_original_sample=_lowerCAmelCase )
def __len__( self ):
return self.config.num_train_timesteps
| 677 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 677 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"sayakpaul/vit-msn-base": "https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json",
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class lowerCAmelCase_ ( __snake_case ):
_UpperCamelCase : Optional[Any] = "vit_msn"
def __init__( self , _lowerCAmelCase=7_6_8 , _lowerCAmelCase=1_2 , _lowerCAmelCase=1_2 , _lowerCAmelCase=3_0_7_2 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-06 , _lowerCAmelCase=2_2_4 , _lowerCAmelCase=1_6 , _lowerCAmelCase=3 , _lowerCAmelCase=True , **_lowerCAmelCase , ):
super().__init__(**_lowerCAmelCase )
_lowercase : Any = hidden_size
_lowercase : List[Any] = num_hidden_layers
_lowercase : List[Any] = num_attention_heads
_lowercase : Any = intermediate_size
_lowercase : Optional[int] = hidden_act
_lowercase : int = hidden_dropout_prob
_lowercase : str = attention_probs_dropout_prob
_lowercase : Optional[Any] = initializer_range
_lowercase : Dict = layer_norm_eps
_lowercase : Optional[int] = image_size
_lowercase : Any = patch_size
_lowercase : List[Any] = num_channels
_lowercase : Union[str, Any] = qkv_bias
| 677 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
for attribute in key.split('.' ):
_lowercase : Union[str, Any] = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if weight_type is not None:
_lowercase : Optional[int] = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).shape
else:
_lowercase : Optional[Any] = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
_lowercase : List[str] = value
elif weight_type == "weight_g":
_lowercase : Any = value
elif weight_type == "weight_v":
_lowercase : Tuple = value
elif weight_type == "bias":
_lowercase : List[str] = value
else:
_lowercase : Dict = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
_lowercase : Optional[int] = []
_lowercase : Optional[int] = fairseq_model.state_dict()
_lowercase : Dict = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
_lowercase : Dict = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == 'group' , )
_lowercase : int = True
else:
for key, mapped_key in MAPPING.items():
_lowercase : Union[str, Any] = 'hubert.' + mapped_key if (is_finetuned and mapped_key != 'lm_head') else mapped_key
if key in name or (key.split('w2v_model.' )[-1] == name.split('.' )[0] and not is_finetuned):
_lowercase : Union[str, Any] = True
if "*" in mapped_key:
_lowercase : Dict = name.split(SCREAMING_SNAKE_CASE )[0].split('.' )[-2]
_lowercase : Dict = mapped_key.replace('*' , SCREAMING_SNAKE_CASE )
if "weight_g" in name:
_lowercase : Optional[int] = 'weight_g'
elif "weight_v" in name:
_lowercase : Optional[Any] = 'weight_v'
elif "weight" in name:
_lowercase : str = 'weight'
elif "bias" in name:
_lowercase : Any = 'bias'
else:
_lowercase : str = None
set_recursively(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE )
logger.warning(F"""Unused weights: {unused_weights}""" )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
_lowercase : Any = full_name.split('conv_layers.' )[-1]
_lowercase : Any = name.split('.' )
_lowercase : Optional[Any] = int(items[0] )
_lowercase : List[str] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
_lowercase : Optional[Any] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
_lowercase : List[str] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
_lowercase : Union[str, Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
_lowercase : List[Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True ) -> Optional[Any]:
if config_path is not None:
_lowercase : Optional[int] = HubertConfig.from_pretrained(SCREAMING_SNAKE_CASE )
else:
_lowercase : List[Any] = HubertConfig()
if is_finetuned:
if dict_path:
_lowercase : List[str] = Dictionary.load(SCREAMING_SNAKE_CASE )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_lowercase : Dict = target_dict.pad_index
_lowercase : Dict = target_dict.bos_index
_lowercase : Tuple = target_dict.eos_index
_lowercase : List[Any] = len(target_dict.symbols )
_lowercase : Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE , 'vocab.json' )
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(SCREAMING_SNAKE_CASE ) )
return
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
with open(SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(target_dict.indices , SCREAMING_SNAKE_CASE )
_lowercase : int = WavaVecaCTCTokenizer(
SCREAMING_SNAKE_CASE , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=SCREAMING_SNAKE_CASE , )
_lowercase : str = True if config.feat_extract_norm == 'layer' else False
_lowercase : Optional[int] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , )
_lowercase : Tuple = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
_lowercase : List[Any] = HubertForCTC(SCREAMING_SNAKE_CASE )
else:
_lowercase : List[Any] = HubertModel(SCREAMING_SNAKE_CASE )
if is_finetuned:
_lowercase , _lowercase , _lowercase : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
_lowercase , _lowercase , _lowercase : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
_lowercase : int = model[0].eval()
recursively_load_weights(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
UpperCamelCase = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 677 | 1 |
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> float:
_lowercase : str = 0
while len(SCREAMING_SNAKE_CASE ) > 1:
_lowercase : List[str] = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
_lowercase : Optional[int] = files.index(min(SCREAMING_SNAKE_CASE ) )
temp += files[min_index]
files.pop(SCREAMING_SNAKE_CASE )
files.append(SCREAMING_SNAKE_CASE )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 677 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=1_3 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=9_9 , _lowerCAmelCase=3_2 , _lowerCAmelCase=2 , _lowerCAmelCase=4 , _lowerCAmelCase=3_7 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=5_1_2 , _lowerCAmelCase=1_6 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=None , _lowerCAmelCase=1_0_0_0 , ):
_lowercase : List[str] = parent
_lowercase : Optional[Any] = batch_size
_lowercase : str = seq_length
_lowercase : Dict = is_training
_lowercase : Optional[int] = use_input_mask
_lowercase : List[Any] = use_token_type_ids
_lowercase : Union[str, Any] = use_labels
_lowercase : Optional[Any] = vocab_size
_lowercase : Optional[Any] = hidden_size
_lowercase : str = num_hidden_layers
_lowercase : Tuple = num_attention_heads
_lowercase : Optional[Any] = intermediate_size
_lowercase : Optional[Any] = hidden_act
_lowercase : Union[str, Any] = hidden_dropout_prob
_lowercase : Union[str, Any] = attention_probs_dropout_prob
_lowercase : int = max_position_embeddings
_lowercase : str = type_vocab_size
_lowercase : Tuple = type_sequence_label_size
_lowercase : Dict = initializer_range
_lowercase : List[Any] = num_labels
_lowercase : List[str] = num_choices
_lowercase : Dict = scope
_lowercase : List[Any] = range_bbox
def __a ( self ):
_lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
_lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_lowercase : List[str] = bbox[i, j, 3]
_lowercase : Optional[int] = bbox[i, j, 1]
_lowercase : int = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_lowercase : Dict = bbox[i, j, 2]
_lowercase : Dict = bbox[i, j, 0]
_lowercase : int = t
_lowercase : Union[str, Any] = tf.convert_to_tensor(_lowerCAmelCase )
_lowercase : Any = None
if self.use_input_mask:
_lowercase : int = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase : Tuple = None
if self.use_token_type_ids:
_lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowercase : Tuple = None
_lowercase : Union[str, Any] = None
_lowercase : List[str] = None
if self.use_labels:
_lowercase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowercase : str = ids_tensor([self.batch_size] , self.num_choices )
_lowercase : Any = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[Any] = TFLayoutLMModel(config=_lowerCAmelCase )
_lowercase : List[Any] = model(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
_lowercase : List[Any] = model(_lowerCAmelCase , _lowerCAmelCase , token_type_ids=_lowerCAmelCase )
_lowercase : List[str] = model(_lowerCAmelCase , _lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[Any] = TFLayoutLMForMaskedLM(config=_lowerCAmelCase )
_lowercase : Any = model(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : str = self.num_labels
_lowercase : Tuple = TFLayoutLMForSequenceClassification(config=_lowerCAmelCase )
_lowercase : int = model(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Any = self.num_labels
_lowercase : Optional[int] = TFLayoutLMForTokenClassification(config=_lowerCAmelCase )
_lowercase : Union[str, Any] = model(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Union[str, Any] = TFLayoutLMForQuestionAnswering(config=_lowerCAmelCase )
_lowercase : str = model(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self ):
_lowercase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : List[Any] = config_and_inputs
_lowercase : Optional[Any] = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( __snake_case , __snake_case , unittest.TestCase ):
_UpperCamelCase : Optional[int] = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
_UpperCamelCase : Union[str, Any] = (
{
"feature-extraction": TFLayoutLMModel,
"fill-mask": TFLayoutLMForMaskedLM,
"text-classification": TFLayoutLMForSequenceClassification,
"token-classification": TFLayoutLMForTokenClassification,
"zero-shot": TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
_UpperCamelCase : str = False
_UpperCamelCase : List[str] = True
_UpperCamelCase : Tuple = 10
def __a ( self ):
_lowercase : Optional[int] = TFLayoutLMModelTester(self )
_lowercase : str = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=3_7 )
def __a ( self ):
self.config_tester.run_common_tests()
def __a ( self ):
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def __a ( self ):
_lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowerCAmelCase )
def __a ( self ):
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowerCAmelCase )
def __a ( self ):
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowerCAmelCase )
def __a ( self ):
_lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowerCAmelCase )
@slow
def __a ( self ):
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : List[Any] = TFLayoutLMModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@unittest.skip('Onnx compliancy broke with TF 2.10' )
def __a ( self ):
pass
def __magic_name__ ( ) -> Optional[int]:
# Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on:
# fmt: off
_lowercase : Optional[Any] = tf.convert_to_tensor([[101,1_019,1_014,1_016,1_037,12_849,4_747,1_004,14_246,2_278,5_439,4_524,5_002,2_930,2_193,2_930,4_341,3_208,1_005,1_055,2_171,2_848,11_300,3_531,102],[101,4_070,4_034,7_020,1_024,3_058,1_015,1_013,2_861,1_013,6_070,19_274,2_772,6_205,27_814,16_147,16_147,4_343,2_047,10_283,10_969,14_389,1_012,2_338,102]] ) # noqa: E231
_lowercase : Tuple = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
_lowercase : Optional[int] = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1_000,1_000,1_000,1_000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1_000,1_000,1_000,1_000]]] ) # noqa: E231
_lowercase : int = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
_lowercase : Union[str, Any] = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def __a ( self ):
_lowercase : Tuple = TFLayoutLMModel.from_pretrained('microsoft/layoutlm-base-uncased' )
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase : Optional[int] = prepare_layoutlm_batch_inputs()
# forward pass
_lowercase : Tuple = model(input_ids=_lowerCAmelCase , bbox=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
# test the sequence output on [0, :3, :3]
_lowercase : Optional[Any] = tf.convert_to_tensor(
[[0.17_85, -0.19_47, -0.04_25], [-0.32_54, -0.28_07, 0.25_53], [-0.53_91, -0.33_22, 0.33_64]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _lowerCAmelCase , atol=1E-3 ) )
# test the pooled output on [1, :3]
_lowercase : Optional[int] = tf.convert_to_tensor([-0.65_80, -0.02_14, 0.85_52] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , _lowerCAmelCase , atol=1E-3 ) )
@slow
def __a ( self ):
# initialize model with randomly initialized sequence classification head
_lowercase : Optional[Any] = TFLayoutLMForSequenceClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=2 )
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase : Optional[Any] = prepare_layoutlm_batch_inputs()
# forward pass
_lowercase : Any = model(
input_ids=_lowerCAmelCase , bbox=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
_lowercase : List[Any] = outputs.loss
_lowercase : Any = (2,)
self.assertEqual(loss.shape , _lowerCAmelCase )
# test the shape of the logits
_lowercase : str = outputs.logits
_lowercase : Dict = (2, 2)
self.assertEqual(logits.shape , _lowerCAmelCase )
@slow
def __a ( self ):
# initialize model with randomly initialized token classification head
_lowercase : Dict = TFLayoutLMForTokenClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=1_3 )
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase : str = prepare_layoutlm_batch_inputs()
# forward pass
_lowercase : Dict = model(
input_ids=_lowerCAmelCase , bbox=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
# test the shape of the logits
_lowercase : Dict = outputs.logits
_lowercase : Optional[Any] = tf.convert_to_tensor((2, 2_5, 1_3) )
self.assertEqual(logits.shape , _lowerCAmelCase )
@slow
def __a ( self ):
# initialize model with randomly initialized token classification head
_lowercase : Union[str, Any] = TFLayoutLMForQuestionAnswering.from_pretrained('microsoft/layoutlm-base-uncased' )
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase : List[Any] = prepare_layoutlm_batch_inputs()
# forward pass
_lowercase : int = model(input_ids=_lowerCAmelCase , bbox=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
# test the shape of the logits
_lowercase : Any = tf.convert_to_tensor((2, 2_5) )
self.assertEqual(outputs.start_logits.shape , _lowerCAmelCase )
self.assertEqual(outputs.end_logits.shape , _lowerCAmelCase )
| 677 | 1 |
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class lowerCAmelCase_ ( unittest.TestCase ):
def __a ( self ):
_lowercase : List[str] = logging.get_logger()
# the current default level is logging.WARNING
_lowercase : Union[str, Any] = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(_lowerCAmelCase )
def __a ( self ):
_lowercase : List[str] = logging.get_verbosity()
_lowercase : int = logging.get_logger('transformers.models.bart.tokenization_bart' )
_lowercase : Tuple = 'Testing 1, 2, 3'
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(_lowerCAmelCase ) as cl:
logger.warning(_lowerCAmelCase )
self.assertEqual(cl.out , msg + '\n' )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(_lowerCAmelCase ) as cl:
logger.warning(_lowerCAmelCase )
self.assertEqual(cl.out , '' )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(_lowerCAmelCase ) as cl:
logger.warning(_lowerCAmelCase )
self.assertEqual(cl.out , msg + '\n' )
# restore to the original level
logging.set_verbosity(_lowerCAmelCase )
@mockenv(TRANSFORMERS_VERBOSITY='error' )
def __a ( self ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
_lowercase : List[str] = logging.get_logger('transformers.models.bart.tokenization_bart' )
_lowercase : int = os.getenv('TRANSFORMERS_VERBOSITY' , _lowerCAmelCase )
_lowercase : Optional[Any] = logging.log_levels[env_level_str]
_lowercase : Dict = logging.get_verbosity()
self.assertEqual(
_lowerCAmelCase , _lowerCAmelCase , F"""TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}""" , )
# restore to the original level
_lowercase : Any = ''
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY='super-error' )
def __a ( self ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
_lowercase : Tuple = logging.logging.getLogger()
with CaptureLogger(_lowerCAmelCase ) as cl:
# this action activates the env var
logging.get_logger('transformers.models.bart.tokenization_bart' )
self.assertIn('Unknown option TRANSFORMERS_VERBOSITY=super-error' , cl.out )
# no need to restore as nothing was changed
def __a ( self ):
# testing `logger.warning_advice()`
transformers.utils.logging._reset_library_root_logger()
_lowercase : str = logging.get_logger('transformers.models.bart.tokenization_bart' )
_lowercase : List[str] = 'Testing 1, 2, 3'
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='1' ):
# nothing should be logged as env var disables this method
with CaptureLogger(_lowerCAmelCase ) as cl:
logger.warning_advice(_lowerCAmelCase )
self.assertEqual(cl.out , '' )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='' ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(_lowerCAmelCase ) as cl:
logger.warning_advice(_lowerCAmelCase )
self.assertEqual(cl.out , msg + '\n' )
def __magic_name__ ( ) -> List[str]:
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 677 |
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class lowerCAmelCase_ ( unittest.TestCase ):
def __a ( self ):
_lowercase : List[str] = logging.get_logger()
# the current default level is logging.WARNING
_lowercase : Union[str, Any] = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(_lowerCAmelCase )
def __a ( self ):
_lowercase : List[str] = logging.get_verbosity()
_lowercase : int = logging.get_logger('transformers.models.bart.tokenization_bart' )
_lowercase : Tuple = 'Testing 1, 2, 3'
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(_lowerCAmelCase ) as cl:
logger.warning(_lowerCAmelCase )
self.assertEqual(cl.out , msg + '\n' )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(_lowerCAmelCase ) as cl:
logger.warning(_lowerCAmelCase )
self.assertEqual(cl.out , '' )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(_lowerCAmelCase ) as cl:
logger.warning(_lowerCAmelCase )
self.assertEqual(cl.out , msg + '\n' )
# restore to the original level
logging.set_verbosity(_lowerCAmelCase )
@mockenv(TRANSFORMERS_VERBOSITY='error' )
def __a ( self ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
_lowercase : List[str] = logging.get_logger('transformers.models.bart.tokenization_bart' )
_lowercase : int = os.getenv('TRANSFORMERS_VERBOSITY' , _lowerCAmelCase )
_lowercase : Optional[Any] = logging.log_levels[env_level_str]
_lowercase : Dict = logging.get_verbosity()
self.assertEqual(
_lowerCAmelCase , _lowerCAmelCase , F"""TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}""" , )
# restore to the original level
_lowercase : Any = ''
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY='super-error' )
def __a ( self ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
_lowercase : Tuple = logging.logging.getLogger()
with CaptureLogger(_lowerCAmelCase ) as cl:
# this action activates the env var
logging.get_logger('transformers.models.bart.tokenization_bart' )
self.assertIn('Unknown option TRANSFORMERS_VERBOSITY=super-error' , cl.out )
# no need to restore as nothing was changed
def __a ( self ):
# testing `logger.warning_advice()`
transformers.utils.logging._reset_library_root_logger()
_lowercase : str = logging.get_logger('transformers.models.bart.tokenization_bart' )
_lowercase : List[str] = 'Testing 1, 2, 3'
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='1' ):
# nothing should be logged as env var disables this method
with CaptureLogger(_lowerCAmelCase ) as cl:
logger.warning_advice(_lowerCAmelCase )
self.assertEqual(cl.out , '' )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='' ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(_lowerCAmelCase ) as cl:
logger.warning_advice(_lowerCAmelCase )
self.assertEqual(cl.out , msg + '\n' )
def __magic_name__ ( ) -> List[str]:
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 677 | 1 |
from __future__ import annotations
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase ):
_lowercase : List[Any] = order
# a_{0} ... a_{k}
_lowercase : List[Any] = [1.0] + [0.0] * order
# b_{0} ... b_{k}
_lowercase : Tuple = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
_lowercase : Any = [0.0] * self.order
# y[n-1] ... y[n-k]
_lowercase : int = [0.0] * self.order
def __a ( self , _lowerCAmelCase , _lowerCAmelCase ):
if len(_lowerCAmelCase ) < self.order:
_lowercase : str = [1.0, *a_coeffs]
if len(_lowerCAmelCase ) != self.order + 1:
_lowercase : Any = (
F"""Expected a_coeffs to have {self.order + 1} elements """
F"""for {self.order}-order filter, got {len(_lowerCAmelCase )}"""
)
raise ValueError(_lowerCAmelCase )
if len(_lowerCAmelCase ) != self.order + 1:
_lowercase : Optional[int] = (
F"""Expected b_coeffs to have {self.order + 1} elements """
F"""for {self.order}-order filter, got {len(_lowerCAmelCase )}"""
)
raise ValueError(_lowerCAmelCase )
_lowercase : List[str] = a_coeffs
_lowercase : str = b_coeffs
def __a ( self , _lowerCAmelCase ):
_lowercase : Any = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
_lowercase : str = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
_lowercase : int = self.input_history[:-1]
_lowercase : List[Any] = self.output_history[:-1]
_lowercase : Optional[Any] = sample
_lowercase : int = result
return result
| 677 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
UpperCamelCase = "pt"
elif is_tf_available():
UpperCamelCase = "tf"
else:
UpperCamelCase = "jax"
class lowerCAmelCase_ ( __snake_case , unittest.TestCase ):
_UpperCamelCase : Dict = PerceiverTokenizer
_UpperCamelCase : str = False
def __a ( self ):
super().setUp()
_lowercase : List[Any] = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __a ( self ):
return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' )
def __a ( self , **_lowerCAmelCase ):
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=2_0 , _lowerCAmelCase=5 ):
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
_lowercase : Union[str, Any] = []
for i in range(len(_lowerCAmelCase ) ):
try:
_lowercase : Any = tokenizer.decode([i] , clean_up_tokenization_spaces=_lowerCAmelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
_lowercase : List[Any] = list(filter(lambda _lowerCAmelCase : re.match(r'^[ a-zA-Z]+$' , t[1] ) , _lowerCAmelCase ) )
_lowercase : Union[str, Any] = list(filter(lambda _lowerCAmelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_lowerCAmelCase ) , _lowerCAmelCase ) )
if max_length is not None and len(_lowerCAmelCase ) > max_length:
_lowercase : Any = toks[:max_length]
if min_length is not None and len(_lowerCAmelCase ) < min_length and len(_lowerCAmelCase ) > 0:
while len(_lowerCAmelCase ) < min_length:
_lowercase : Optional[Any] = toks + toks
# toks_str = [t[1] for t in toks]
_lowercase : Optional[Any] = [t[0] for t in toks]
# Ensure consistency
_lowercase : Any = tokenizer.decode(_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase )
if " " not in output_txt and len(_lowerCAmelCase ) > 1:
_lowercase : List[str] = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_lowerCAmelCase )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_lowerCAmelCase )
)
if with_prefix_space:
_lowercase : List[Any] = ' ' + output_txt
_lowercase : Dict = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
return output_txt, output_ids
def __a ( self ):
_lowercase : Dict = self.perceiver_tokenizer
_lowercase : Optional[Any] = 'Unicode €.'
_lowercase : str = tokenizer(_lowerCAmelCase )
_lowercase : int = [4, 9_1, 1_1_6, 1_1_1, 1_0_5, 1_1_7, 1_0_6, 1_0_7, 3_8, 2_3_2, 1_3_6, 1_7_8, 5_2, 5]
self.assertEqual(encoded['input_ids'] , _lowerCAmelCase )
# decoding
_lowercase : List[Any] = tokenizer.decode(_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , '[CLS]Unicode €.[SEP]' )
_lowercase : Union[str, Any] = tokenizer('e è é ê ë' )
_lowercase : List[Any] = [4, 1_0_7, 3_8, 2_0_1, 1_7_4, 3_8, 2_0_1, 1_7_5, 3_8, 2_0_1, 1_7_6, 3_8, 2_0_1, 1_7_7, 5]
self.assertEqual(encoded['input_ids'] , _lowerCAmelCase )
# decoding
_lowercase : int = tokenizer.decode(_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , '[CLS]e è é ê ë[SEP]' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , '[CLS]e è é ê ë[SEP]' )
def __a ( self ):
_lowercase : List[str] = self.perceiver_tokenizer
_lowercase : Union[str, Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
_lowercase : Optional[int] = [4, 7_1, 3_8, 1_1_4, 1_1_7, 1_1_6, 1_0_9, 3_8, 1_1_8, 1_0_3, 1_2_0, 1_0_3, 1_0_9, 1_2_0, 1_0_3, 1_1_8, 1_1_0, 3_8, 1_0_8, 1_1_7, 1_2_0, 3_8, 1_2_1, 1_2_3, 1_1_5, 1_1_5, 1_0_3, 1_2_0, 1_1_1, 1_2_8, 1_0_3, 1_2_2, 1_1_1, 1_1_7, 1_1_6, 5_2, 5, 0]
# fmt: on
_lowercase : List[Any] = tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors=_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
if FRAMEWORK != "jax":
_lowercase : int = list(batch.input_ids.numpy()[0] )
else:
_lowercase : List[Any] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual((2, 3_8) , batch.input_ids.shape )
self.assertEqual((2, 3_8) , batch.attention_mask.shape )
def __a ( self ):
_lowercase : List[Any] = self.perceiver_tokenizer
_lowercase : Dict = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_lowercase : List[str] = tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors=_lowerCAmelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , _lowerCAmelCase )
self.assertIn('attention_mask' , _lowerCAmelCase )
self.assertNotIn('decoder_input_ids' , _lowerCAmelCase )
self.assertNotIn('decoder_attention_mask' , _lowerCAmelCase )
def __a ( self ):
_lowercase : Optional[int] = self.perceiver_tokenizer
_lowercase : Optional[Any] = [
'Summary of the text.',
'Another summary.',
]
_lowercase : Optional[int] = tokenizer(
text_target=_lowerCAmelCase , max_length=3_2 , padding='max_length' , truncation=_lowerCAmelCase , return_tensors=_lowerCAmelCase )
self.assertEqual(3_2 , targets['input_ids'].shape[1] )
def __a ( self ):
# safety check on max_len default value so we are sure the test works
_lowercase : Tuple = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 4_2 )
# Now let's start the test
_lowercase : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
_lowercase : Dict = tempfile.mkdtemp()
_lowercase : Tuple = ' He is very happy, UNwant\u00E9d,running'
_lowercase : Union[str, Any] = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
tokenizer.save_pretrained(_lowerCAmelCase )
_lowercase : Tuple = tokenizer.__class__.from_pretrained(_lowerCAmelCase )
_lowercase : Optional[Any] = after_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
shutil.rmtree(_lowerCAmelCase )
_lowercase : Union[str, Any] = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
_lowercase : List[str] = tempfile.mkdtemp()
_lowercase : int = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
_lowercase : Any = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
_lowercase : Tuple = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
tokenizer.save_pretrained(_lowerCAmelCase )
_lowercase : Tuple = tokenizer.__class__.from_pretrained(_lowerCAmelCase )
_lowercase : Tuple = after_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 4_2 )
_lowercase : List[Any] = tokenizer.__class__.from_pretrained(_lowerCAmelCase , model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length , 4_3 )
shutil.rmtree(_lowerCAmelCase )
def __a ( self ):
_lowercase : Optional[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
_lowercase : List[str] = json.load(_lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
_lowercase : Tuple = json.load(_lowerCAmelCase )
_lowercase : Any = [F"""<extra_id_{i}>""" for i in range(1_2_5 )]
_lowercase : str = added_tokens_extra_ids + [
'an_additional_special_token'
]
_lowercase : Optional[int] = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(_lowerCAmelCase , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_lowercase : Optional[int] = tokenizer_class.from_pretrained(
_lowerCAmelCase , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_lowercase : int = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=_lowerCAmelCase )]
_lowercase : Tuple = tokenizer_class.from_pretrained(
_lowerCAmelCase , additional_special_tokens=_lowerCAmelCase , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def __a ( self ):
_lowercase : str = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([1_7_8] ) , '�' )
def __a ( self ):
pass
def __a ( self ):
pass
def __a ( self ):
pass
def __a ( self ):
pass
def __a ( self ):
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
_lowercase : List[str] = self.get_tokenizers(fast=_lowerCAmelCase , do_lower_case=_lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_lowercase : Optional[Any] = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]']
_lowercase : Optional[Any] = tokenizer.convert_tokens_to_string(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
| 677 | 1 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Dict:
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> int:
_lowercase : Tuple = create_tensor(SCREAMING_SNAKE_CASE )
_lowercase : Dict = gather(SCREAMING_SNAKE_CASE )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Optional[int]:
_lowercase : Optional[Any] = [state.process_index]
_lowercase : List[str] = gather_object(SCREAMING_SNAKE_CASE )
assert len(SCREAMING_SNAKE_CASE ) == state.num_processes, F"""{gathered_obj}, {len(SCREAMING_SNAKE_CASE )} != {state.num_processes}"""
assert gathered_obj == list(range(state.num_processes ) ), F"""{gathered_obj} != {list(range(state.num_processes ) )}"""
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Dict:
_lowercase : Optional[Any] = create_tensor(SCREAMING_SNAKE_CASE )
_lowercase : str = broadcast(SCREAMING_SNAKE_CASE )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> int:
# We need to pad the tensor with one more element if we are the main process
# to ensure that we can pad
if state.is_main_process:
_lowercase : List[Any] = torch.arange(state.num_processes + 1 ).to(state.device )
else:
_lowercase : Dict = torch.arange(state.num_processes ).to(state.device )
_lowercase : Optional[int] = pad_across_processes(SCREAMING_SNAKE_CASE )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Optional[Any]:
# For now runs on only two processes
if state.num_processes != 2:
return
_lowercase : int = create_tensor(SCREAMING_SNAKE_CASE )
_lowercase : Optional[int] = reduce(SCREAMING_SNAKE_CASE , 'sum' )
_lowercase : Any = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ), F"""{reduced_tensor} != {truth_tensor}"""
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Any:
# For now runs on only two processes
if state.num_processes != 2:
return
_lowercase : Union[str, Any] = create_tensor(SCREAMING_SNAKE_CASE )
_lowercase : Union[str, Any] = reduce(SCREAMING_SNAKE_CASE , 'mean' )
_lowercase : Dict = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ), F"""{reduced_tensor} != {truth_tensor}"""
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> str:
# For xla_spawn (TPUs)
main()
def __magic_name__ ( ) -> List[Any]:
_lowercase : str = PartialState()
state.print(F"""State: {state}""" )
state.print('testing gather' )
test_gather(SCREAMING_SNAKE_CASE )
state.print('testing gather_object' )
test_gather_object(SCREAMING_SNAKE_CASE )
state.print('testing broadcast' )
test_broadcast(SCREAMING_SNAKE_CASE )
state.print('testing pad_across_processes' )
test_pad_across_processes(SCREAMING_SNAKE_CASE )
state.print('testing reduce_sum' )
test_reduce_sum(SCREAMING_SNAKE_CASE )
state.print('testing reduce_mean' )
test_reduce_mean(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 677 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase = {
"configuration_conditional_detr": [
"CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ConditionalDetrConfig",
"ConditionalDetrOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["ConditionalDetrFeatureExtractor"]
UpperCamelCase = ["ConditionalDetrImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConditionalDetrForObjectDetection",
"ConditionalDetrForSegmentation",
"ConditionalDetrModel",
"ConditionalDetrPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 677 | 1 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
UpperCamelCase = logging.get_logger(__name__)
@add_end_docstrings(__snake_case )
class lowerCAmelCase_ ( __snake_case ):
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == 'tf' else MODEL_FOR_VISION_2_SEQ_MAPPING )
def __a ( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None ):
_lowercase : Optional[Any] = {}
_lowercase : Any = {}
if prompt is not None:
_lowercase : Dict = prompt
if generate_kwargs is not None:
_lowercase : Optional[Any] = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
_lowercase : List[str] = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'
' please use only one' )
_lowercase : Union[str, Any] = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self , _lowerCAmelCase , **_lowerCAmelCase ):
return super().__call__(_lowerCAmelCase , **_lowerCAmelCase )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase=None ):
_lowercase : Optional[int] = load_image(_lowerCAmelCase )
if prompt is not None:
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise ValueError(
F"""Received an invalid text input, got - {type(_lowerCAmelCase )} - but expected a single string. """
'Note also that one single text can be provided for conditional image to text generation.' )
_lowercase : Any = self.model.config.model_type
if model_type == "git":
_lowercase : Optional[Any] = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework )
_lowercase : Union[str, Any] = self.tokenizer(text=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ).input_ids
_lowercase : List[Any] = [self.tokenizer.cls_token_id] + input_ids
_lowercase : Optional[int] = torch.tensor(_lowerCAmelCase ).unsqueeze(0 )
model_inputs.update({'input_ids': input_ids} )
elif model_type == "pix2struct":
_lowercase : Optional[int] = self.image_processor(images=_lowerCAmelCase , header_text=_lowerCAmelCase , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
_lowercase : Optional[int] = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework )
_lowercase : Optional[int] = self.tokenizer(_lowerCAmelCase , return_tensors=self.framework )
model_inputs.update(_lowerCAmelCase )
else:
raise ValueError(F"""Model type {model_type} does not support conditional text generation""" )
else:
_lowercase : str = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
_lowercase : int = None
return model_inputs
def __a ( self , _lowerCAmelCase , _lowerCAmelCase=None ):
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs['input_ids'] , _lowerCAmelCase )
and all(x is None for x in model_inputs['input_ids'] )
):
_lowercase : Tuple = None
if generate_kwargs is None:
_lowercase : Any = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
_lowercase : Any = model_inputs.pop(self.model.main_input_name )
_lowercase : Optional[int] = self.model.generate(_lowerCAmelCase , **_lowerCAmelCase , **_lowerCAmelCase )
return model_outputs
def __a ( self , _lowerCAmelCase ):
_lowercase : Tuple = []
for output_ids in model_outputs:
_lowercase : Any = {
'generated_text': self.tokenizer.decode(
_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase , )
}
records.append(_lowerCAmelCase )
return records
| 677 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCAmelCase_ ( __snake_case ):
_UpperCamelCase : Tuple = "ClapFeatureExtractor"
_UpperCamelCase : Optional[int] = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ):
super().__init__(_lowerCAmelCase , _lowerCAmelCase )
def __call__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , **_lowerCAmelCase ):
_lowercase : str = kwargs.pop('sampling_rate' , _lowerCAmelCase )
if text is None and audios is None:
raise ValueError('You have to specify either text or audios. Both cannot be none.' )
if text is not None:
_lowercase : Dict = self.tokenizer(_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase )
if audios is not None:
_lowercase : Any = self.feature_extractor(
_lowerCAmelCase , sampling_rate=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase )
if text is not None and audios is not None:
_lowercase : Union[str, Any] = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_lowerCAmelCase ) , tensor_type=_lowerCAmelCase )
def __a ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
return self.tokenizer.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase )
def __a ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
return self.tokenizer.decode(*_lowerCAmelCase , **_lowerCAmelCase )
@property
def __a ( self ):
_lowercase : Dict = self.tokenizer.model_input_names
_lowercase : Any = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 677 | 1 |
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> bool:
_lowercase : Union[str, Any] = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def __magic_name__ ( SCREAMING_SNAKE_CASE = 5_000 ) -> int:
_lowercase : int = [(i * (3 * i - 1)) // 2 for i in range(1 , SCREAMING_SNAKE_CASE )]
for i, pentagonal_i in enumerate(SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) ):
_lowercase : Any = pentagonal_nums[j]
_lowercase : int = pentagonal_i + pentagonal_j
_lowercase : Tuple = pentagonal_j - pentagonal_i
if is_pentagonal(SCREAMING_SNAKE_CASE ) and is_pentagonal(SCREAMING_SNAKE_CASE ):
return b
return -1
if __name__ == "__main__":
print(f'''{solution() = }''')
| 677 |
from __future__ import annotations
from typing import Any
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase ):
_lowercase : Any = num_of_nodes
_lowercase : list[list[int]] = []
_lowercase : dict[int, int] = {}
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
self.m_edges.append([u_node, v_node, weight] )
def __a ( self , _lowerCAmelCase ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def __a ( self , _lowerCAmelCase ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
_lowercase : Optional[int] = self.find_component(_lowerCAmelCase )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
if component_size[u_node] <= component_size[v_node]:
_lowercase : str = v_node
component_size[v_node] += component_size[u_node]
self.set_component(_lowerCAmelCase )
elif component_size[u_node] >= component_size[v_node]:
_lowercase : Any = self.find_component(_lowerCAmelCase )
component_size[u_node] += component_size[v_node]
self.set_component(_lowerCAmelCase )
def __a ( self ):
_lowercase : Any = []
_lowercase : Optional[Any] = 0
_lowercase : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
_lowercase : str = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
_lowercase , _lowercase , _lowercase : List[str] = edge
_lowercase : Union[str, Any] = self.m_component[u]
_lowercase : Union[str, Any] = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
_lowercase : str = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_lowercase , _lowercase , _lowercase : int = edge
_lowercase : Optional[int] = self.m_component[u]
_lowercase : Optional[Any] = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
print(F"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" )
num_of_components -= 1
_lowercase : str = [-1] * self.m_num_of_nodes
print(F"""The total weight of the minimal spanning tree is: {mst_weight}""" )
def __magic_name__ ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 677 | 1 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=__snake_case )
class lowerCAmelCase_ ( __snake_case ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
_UpperCamelCase : str = field(default="summarization" , metadata={"include_in_asdict_even_if_is_default": True} )
_UpperCamelCase : ClassVar[Features] = Features({"text": Value("string" )} )
_UpperCamelCase : ClassVar[Features] = Features({"summary": Value("string" )} )
_UpperCamelCase : str = "text"
_UpperCamelCase : str = "summary"
@property
def __a ( self ):
return {self.text_column: "text", self.summary_column: "summary"}
| 677 |
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Optional[Any]:
_lowercase : Tuple = {}
_lowercase : str = tokenizer(example['content'] , truncation=SCREAMING_SNAKE_CASE )['input_ids']
_lowercase : List[str] = len(example['content'] ) / len(output['input_ids'] )
return output
UpperCamelCase = HfArgumentParser(PretokenizationArguments)
UpperCamelCase = parser.parse_args()
if args.num_workers is None:
UpperCamelCase = multiprocessing.cpu_count()
UpperCamelCase = AutoTokenizer.from_pretrained(args.tokenizer_dir)
UpperCamelCase = time.time()
UpperCamelCase = load_dataset(args.dataset_name, split="train")
print(f'''Dataset loaded in {time.time()-t_start:.2f}s''')
UpperCamelCase = time.time()
UpperCamelCase = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"repo_name",
"path",
"copies",
"size",
"content",
"license",
"hash",
"line_mean",
"line_max",
"alpha_frac",
"autogenerated",
],
)
print(f'''Dataset tokenized in {time.time()-t_start:.2f}s''')
UpperCamelCase = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(f'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 677 | 1 |
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class lowerCAmelCase_ ( __snake_case , __snake_case , unittest.TestCase ):
_UpperCamelCase : Union[str, Any] = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_UpperCamelCase : List[Any] = (
{
"feature-extraction": TFMobileBertModel,
"fill-mask": TFMobileBertForMaskedLM,
"question-answering": TFMobileBertForQuestionAnswering,
"text-classification": TFMobileBertForSequenceClassification,
"token-classification": TFMobileBertForTokenClassification,
"zero-shot": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_UpperCamelCase : int = False
_UpperCamelCase : Optional[int] = False
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ):
_lowercase : int = super()._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
if return_labels:
if model_class in get_values(_lowerCAmelCase ):
_lowercase : Optional[int] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class lowerCAmelCase_ ( __snake_case ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=1_3 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=9_9 , _lowerCAmelCase=3_2 , _lowerCAmelCase=3_2 , _lowerCAmelCase=2 , _lowerCAmelCase=4 , _lowerCAmelCase=3_7 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=5_1_2 , _lowerCAmelCase=1_6 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=None , ):
_lowercase : Optional[Any] = parent
_lowercase : str = batch_size
_lowercase : Optional[int] = seq_length
_lowercase : Tuple = is_training
_lowercase : List[Any] = use_input_mask
_lowercase : Optional[Any] = use_token_type_ids
_lowercase : Any = use_labels
_lowercase : str = vocab_size
_lowercase : List[Any] = hidden_size
_lowercase : Union[str, Any] = num_hidden_layers
_lowercase : Tuple = num_attention_heads
_lowercase : Optional[int] = intermediate_size
_lowercase : Tuple = hidden_act
_lowercase : Dict = hidden_dropout_prob
_lowercase : Optional[int] = attention_probs_dropout_prob
_lowercase : Tuple = max_position_embeddings
_lowercase : List[str] = type_vocab_size
_lowercase : Optional[Any] = type_sequence_label_size
_lowercase : List[Any] = initializer_range
_lowercase : List[str] = num_labels
_lowercase : Union[str, Any] = num_choices
_lowercase : List[str] = scope
_lowercase : Union[str, Any] = embedding_size
def __a ( self ):
_lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase : Optional[int] = None
if self.use_input_mask:
_lowercase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase : int = None
if self.use_token_type_ids:
_lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowercase : Dict = None
_lowercase : Any = None
_lowercase : int = None
if self.use_labels:
_lowercase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowercase : Dict = ids_tensor([self.batch_size] , self.num_choices )
_lowercase : Optional[Any] = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Union[str, Any] = TFMobileBertModel(config=_lowerCAmelCase )
_lowercase : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowercase : Union[str, Any] = model(_lowerCAmelCase )
_lowercase : Tuple = [input_ids, input_mask]
_lowercase : str = model(_lowerCAmelCase )
_lowercase : List[str] = model(_lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[int] = TFMobileBertForMaskedLM(config=_lowerCAmelCase )
_lowercase : Union[str, Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowercase : int = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Any = TFMobileBertForNextSentencePrediction(config=_lowerCAmelCase )
_lowercase : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowercase : Optional[int] = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[Any] = TFMobileBertForPreTraining(config=_lowerCAmelCase )
_lowercase : Tuple = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowercase : Union[str, Any] = model(_lowerCAmelCase )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[int] = self.num_labels
_lowercase : Tuple = TFMobileBertForSequenceClassification(config=_lowerCAmelCase )
_lowercase : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowercase : List[str] = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[Any] = self.num_choices
_lowercase : List[str] = TFMobileBertForMultipleChoice(config=_lowerCAmelCase )
_lowercase : Optional[int] = tf.tile(tf.expand_dims(_lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_lowercase : Optional[int] = tf.tile(tf.expand_dims(_lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_lowercase : Tuple = tf.tile(tf.expand_dims(_lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_lowercase : str = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
_lowercase : Union[str, Any] = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : List[str] = self.num_labels
_lowercase : int = TFMobileBertForTokenClassification(config=_lowerCAmelCase )
_lowercase : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowercase : List[str] = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Tuple = TFMobileBertForQuestionAnswering(config=_lowerCAmelCase )
_lowercase : Any = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowercase : int = model(_lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self ):
_lowercase : List[str] = self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : int = config_and_inputs
_lowercase : Tuple = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
def __a ( self ):
_lowercase : List[str] = TFMobileBertModelTest.TFMobileBertModelTester(self )
_lowercase : Union[str, Any] = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=3_7 )
def __a ( self ):
self.config_tester.run_common_tests()
def __a ( self ):
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*_lowerCAmelCase )
def __a ( self ):
_lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*_lowerCAmelCase )
def __a ( self ):
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*_lowerCAmelCase )
def __a ( self ):
_lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*_lowerCAmelCase )
def __a ( self ):
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*_lowerCAmelCase )
def __a ( self ):
_lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*_lowerCAmelCase )
def __a ( self ):
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*_lowerCAmelCase )
def __a ( self ):
_lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*_lowerCAmelCase )
@slow
def __a ( self ):
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
_lowercase : List[str] = TFMobileBertModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def __a ( self ):
_lowercase : Dict = TFMobileBertForPreTraining.from_pretrained('google/mobilebert-uncased' )
_lowercase : Union[str, Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
_lowercase : List[str] = model(_lowerCAmelCase )[0]
_lowercase : str = [1, 6, 3_0_5_2_2]
self.assertEqual(output.shape , _lowerCAmelCase )
_lowercase : List[Any] = tf.constant(
[
[
[-4.5_91_95_47, -9.24_82_95, -9.64_52_56],
[-6.7_30_61_75, -6.44_02_84, -6.6_05_28_37],
[-7.2_74_35_06, -6.7_84_79_15, -6.02_46_73],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _lowerCAmelCase , atol=1E-4 )
| 677 |
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
UpperCamelCase = logging.getLogger(__name__)
UpperCamelCase = {"facebook/bart-base": BartForConditionalGeneration}
UpperCamelCase = {"facebook/bart-base": BartTokenizer}
def __magic_name__ ( ) -> str:
_lowercase : Optional[int] = argparse.ArgumentParser(description='Export Bart model + Beam Search to ONNX graph.' )
parser.add_argument(
'--validation_file' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='A csv or a json file containing the validation data.' )
parser.add_argument(
'--max_length' , type=SCREAMING_SNAKE_CASE , default=5 , help='The maximum total input sequence length after tokenization.' , )
parser.add_argument(
'--num_beams' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help=(
'Number of beams to use for evaluation. This argument will be '
'passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'
) , )
parser.add_argument(
'--model_name_or_path' , type=SCREAMING_SNAKE_CASE , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=SCREAMING_SNAKE_CASE , )
parser.add_argument(
'--config_name' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='Pretrained config name or path if not the same as model_name' , )
parser.add_argument(
'--device' , type=SCREAMING_SNAKE_CASE , default='cpu' , help='Device where the model will be run' , )
parser.add_argument('--output_file_path' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='Where to store the final ONNX file.' )
_lowercase : Optional[Any] = parser.parse_args()
return args
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE="cpu" ) -> List[Any]:
_lowercase : Dict = model_dict[model_name].from_pretrained(SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE )
_lowercase : int = tokenizer_dict[model_name].from_pretrained(SCREAMING_SNAKE_CASE )
if model_name in ["facebook/bart-base"]:
_lowercase : Dict = 0
_lowercase : Optional[int] = None
_lowercase : Union[str, Any] = 0
return huggingface_model, tokenizer
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
model.eval()
_lowercase : List[Any] = None
_lowercase : List[str] = torch.jit.script(BARTBeamSearchGenerator(SCREAMING_SNAKE_CASE ) )
with torch.no_grad():
_lowercase : Optional[int] = 'My friends are cool but they eat too many carbs.'
_lowercase : int = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1_024 , return_tensors='pt' ).to(model.device )
_lowercase : str = model.generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , num_beams=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , early_stopping=SCREAMING_SNAKE_CASE , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
SCREAMING_SNAKE_CASE , (
inputs['input_ids'],
inputs['attention_mask'],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , SCREAMING_SNAKE_CASE , opset_version=14 , input_names=['input_ids', 'attention_mask', 'num_beams', 'max_length', 'decoder_start_token_id'] , output_names=['output_ids'] , dynamic_axes={
'input_ids': {0: 'batch', 1: 'seq'},
'output_ids': {0: 'batch', 1: 'seq_out'},
} , example_outputs=SCREAMING_SNAKE_CASE , )
logger.info('Model exported to {}'.format(SCREAMING_SNAKE_CASE ) )
_lowercase : str = remove_dup_initializers(os.path.abspath(SCREAMING_SNAKE_CASE ) )
logger.info('Deduplicated and optimized model written to {}'.format(SCREAMING_SNAKE_CASE ) )
_lowercase : Union[str, Any] = onnxruntime.InferenceSession(SCREAMING_SNAKE_CASE )
_lowercase : Union[str, Any] = ort_sess.run(
SCREAMING_SNAKE_CASE , {
'input_ids': inputs['input_ids'].cpu().numpy(),
'attention_mask': inputs['attention_mask'].cpu().numpy(),
'num_beams': np.array(SCREAMING_SNAKE_CASE ),
'max_length': np.array(SCREAMING_SNAKE_CASE ),
'decoder_start_token_id': np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info('Model outputs from torch and ONNX Runtime are similar.' )
logger.info('Success.' )
def __magic_name__ ( ) -> Any:
_lowercase : Dict = parse_args()
_lowercase : Union[str, Any] = 5
_lowercase : Union[str, Any] = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
_lowercase : Optional[Any] = torch.device(args.device )
_lowercase , _lowercase : List[Any] = load_model_tokenizer(args.model_name_or_path , SCREAMING_SNAKE_CASE )
if model.config.decoder_start_token_id is None:
raise ValueError('Make sure that `config.decoder_start_token_id` is correctly defined' )
model.to(SCREAMING_SNAKE_CASE )
if args.max_length:
_lowercase : Any = args.max_length
if args.num_beams:
_lowercase : List[str] = args.num_beams
if args.output_file_path:
_lowercase : Union[str, Any] = args.output_file_path
else:
_lowercase : Tuple = 'BART.onnx'
logger.info('Exporting model to ONNX' )
export_and_validate_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 677 | 1 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
UpperCamelCase = {"configuration_dpt": ["DPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DPTConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["DPTFeatureExtractor"]
UpperCamelCase = ["DPTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"DPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DPTForDepthEstimation",
"DPTForSemanticSegmentation",
"DPTModel",
"DPTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 677 |
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class lowerCAmelCase_ ( __snake_case , __snake_case , unittest.TestCase ):
_UpperCamelCase : Union[str, Any] = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_UpperCamelCase : List[Any] = (
{
"feature-extraction": TFMobileBertModel,
"fill-mask": TFMobileBertForMaskedLM,
"question-answering": TFMobileBertForQuestionAnswering,
"text-classification": TFMobileBertForSequenceClassification,
"token-classification": TFMobileBertForTokenClassification,
"zero-shot": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_UpperCamelCase : int = False
_UpperCamelCase : Optional[int] = False
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ):
_lowercase : int = super()._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
if return_labels:
if model_class in get_values(_lowerCAmelCase ):
_lowercase : Optional[int] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class lowerCAmelCase_ ( __snake_case ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=1_3 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=9_9 , _lowerCAmelCase=3_2 , _lowerCAmelCase=3_2 , _lowerCAmelCase=2 , _lowerCAmelCase=4 , _lowerCAmelCase=3_7 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=5_1_2 , _lowerCAmelCase=1_6 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=None , ):
_lowercase : Optional[Any] = parent
_lowercase : str = batch_size
_lowercase : Optional[int] = seq_length
_lowercase : Tuple = is_training
_lowercase : List[Any] = use_input_mask
_lowercase : Optional[Any] = use_token_type_ids
_lowercase : Any = use_labels
_lowercase : str = vocab_size
_lowercase : List[Any] = hidden_size
_lowercase : Union[str, Any] = num_hidden_layers
_lowercase : Tuple = num_attention_heads
_lowercase : Optional[int] = intermediate_size
_lowercase : Tuple = hidden_act
_lowercase : Dict = hidden_dropout_prob
_lowercase : Optional[int] = attention_probs_dropout_prob
_lowercase : Tuple = max_position_embeddings
_lowercase : List[str] = type_vocab_size
_lowercase : Optional[Any] = type_sequence_label_size
_lowercase : List[Any] = initializer_range
_lowercase : List[str] = num_labels
_lowercase : Union[str, Any] = num_choices
_lowercase : List[str] = scope
_lowercase : Union[str, Any] = embedding_size
def __a ( self ):
_lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase : Optional[int] = None
if self.use_input_mask:
_lowercase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase : int = None
if self.use_token_type_ids:
_lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowercase : Dict = None
_lowercase : Any = None
_lowercase : int = None
if self.use_labels:
_lowercase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowercase : Dict = ids_tensor([self.batch_size] , self.num_choices )
_lowercase : Optional[Any] = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Union[str, Any] = TFMobileBertModel(config=_lowerCAmelCase )
_lowercase : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowercase : Union[str, Any] = model(_lowerCAmelCase )
_lowercase : Tuple = [input_ids, input_mask]
_lowercase : str = model(_lowerCAmelCase )
_lowercase : List[str] = model(_lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[int] = TFMobileBertForMaskedLM(config=_lowerCAmelCase )
_lowercase : Union[str, Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowercase : int = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Any = TFMobileBertForNextSentencePrediction(config=_lowerCAmelCase )
_lowercase : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowercase : Optional[int] = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[Any] = TFMobileBertForPreTraining(config=_lowerCAmelCase )
_lowercase : Tuple = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowercase : Union[str, Any] = model(_lowerCAmelCase )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[int] = self.num_labels
_lowercase : Tuple = TFMobileBertForSequenceClassification(config=_lowerCAmelCase )
_lowercase : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowercase : List[str] = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[Any] = self.num_choices
_lowercase : List[str] = TFMobileBertForMultipleChoice(config=_lowerCAmelCase )
_lowercase : Optional[int] = tf.tile(tf.expand_dims(_lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_lowercase : Optional[int] = tf.tile(tf.expand_dims(_lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_lowercase : Tuple = tf.tile(tf.expand_dims(_lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_lowercase : str = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
_lowercase : Union[str, Any] = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : List[str] = self.num_labels
_lowercase : int = TFMobileBertForTokenClassification(config=_lowerCAmelCase )
_lowercase : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowercase : List[str] = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Tuple = TFMobileBertForQuestionAnswering(config=_lowerCAmelCase )
_lowercase : Any = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowercase : int = model(_lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self ):
_lowercase : List[str] = self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : int = config_and_inputs
_lowercase : Tuple = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
def __a ( self ):
_lowercase : List[str] = TFMobileBertModelTest.TFMobileBertModelTester(self )
_lowercase : Union[str, Any] = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=3_7 )
def __a ( self ):
self.config_tester.run_common_tests()
def __a ( self ):
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*_lowerCAmelCase )
def __a ( self ):
_lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*_lowerCAmelCase )
def __a ( self ):
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*_lowerCAmelCase )
def __a ( self ):
_lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*_lowerCAmelCase )
def __a ( self ):
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*_lowerCAmelCase )
def __a ( self ):
_lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*_lowerCAmelCase )
def __a ( self ):
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*_lowerCAmelCase )
def __a ( self ):
_lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*_lowerCAmelCase )
@slow
def __a ( self ):
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
_lowercase : List[str] = TFMobileBertModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def __a ( self ):
_lowercase : Dict = TFMobileBertForPreTraining.from_pretrained('google/mobilebert-uncased' )
_lowercase : Union[str, Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
_lowercase : List[str] = model(_lowerCAmelCase )[0]
_lowercase : str = [1, 6, 3_0_5_2_2]
self.assertEqual(output.shape , _lowerCAmelCase )
_lowercase : List[Any] = tf.constant(
[
[
[-4.5_91_95_47, -9.24_82_95, -9.64_52_56],
[-6.7_30_61_75, -6.44_02_84, -6.6_05_28_37],
[-7.2_74_35_06, -6.7_84_79_15, -6.02_46_73],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _lowerCAmelCase , atol=1E-4 )
| 677 | 1 |
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class lowerCAmelCase_ ( __snake_case ):
_UpperCamelCase : Union[List[PIL.Image.Image], np.ndarray]
_UpperCamelCase : Optional[List[bool]]
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 677 |
import qiskit
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> qiskit.result.counts.Counts:
_lowercase : Union[str, Any] = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
_lowercase : Optional[Any] = qiskit.QuantumCircuit(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
_lowercase : Optional[Any] = qiskit.execute(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , shots=1_000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCamelCase = single_qubit_measure(2, 2)
print(f'''Total count for various states are: {counts}''')
| 677 | 1 |
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
UpperCamelCase = logging.get_logger(__name__)
@add_end_docstrings(
__snake_case , r"\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n " , )
class lowerCAmelCase_ ( __snake_case ):
def __a ( self , _lowerCAmelCase ):
if self.framework == "tf":
_lowercase : Union[str, Any] = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
_lowercase : Union[str, Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_lowerCAmelCase )
else:
raise ValueError('Unsupported framework' )
return masked_index
def __a ( self , _lowerCAmelCase ):
_lowercase : List[Any] = self.get_masked_index(_lowerCAmelCase )
_lowercase : Optional[int] = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'fill-mask' , self.model.base_model_prefix , F"""No mask_token ({self.tokenizer.mask_token}) found on the input""" , )
def __a ( self , _lowerCAmelCase ):
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['input_ids'][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(_lowerCAmelCase )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase=None , **_lowerCAmelCase ):
if return_tensors is None:
_lowercase : str = self.framework
_lowercase : Union[str, Any] = self.tokenizer(_lowerCAmelCase , return_tensors=_lowerCAmelCase )
self.ensure_exactly_one_mask_token(_lowerCAmelCase )
return model_inputs
def __a ( self , _lowerCAmelCase ):
_lowercase : Union[str, Any] = self.model(**_lowerCAmelCase )
_lowercase : List[str] = model_inputs['input_ids']
return model_outputs
def __a ( self , _lowerCAmelCase , _lowerCAmelCase=5 , _lowerCAmelCase=None ):
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
_lowercase : Union[str, Any] = target_ids.shape[0]
_lowercase : List[Any] = model_outputs['input_ids'][0]
_lowercase : Tuple = model_outputs['logits']
if self.framework == "tf":
_lowercase : Optional[Any] = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
_lowercase : Optional[int] = outputs.numpy()
_lowercase : Any = outputs[0, masked_index, :]
_lowercase : List[Any] = stable_softmax(_lowerCAmelCase , axis=-1 )
if target_ids is not None:
_lowercase : List[Any] = tf.gather_nd(tf.squeeze(_lowerCAmelCase , 0 ) , target_ids.reshape(-1 , 1 ) )
_lowercase : Optional[Any] = tf.expand_dims(_lowerCAmelCase , 0 )
_lowercase : Optional[int] = tf.math.top_k(_lowerCAmelCase , k=_lowerCAmelCase )
_lowercase , _lowercase : int = topk.values.numpy(), topk.indices.numpy()
else:
_lowercase : Optional[Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_lowerCAmelCase ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
_lowercase : List[Any] = outputs[0, masked_index, :]
_lowercase : Tuple = logits.softmax(dim=-1 )
if target_ids is not None:
_lowercase : int = probs[..., target_ids]
_lowercase , _lowercase : int = probs.topk(_lowerCAmelCase )
_lowercase : Dict = []
_lowercase : List[Any] = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
_lowercase : int = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
_lowercase : int = input_ids.numpy().copy()
if target_ids is not None:
_lowercase : Union[str, Any] = target_ids[p].tolist()
_lowercase : List[Any] = p
# Filter padding out:
_lowercase : Any = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
_lowercase : Union[str, Any] = self.tokenizer.decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
_lowercase : str = {'score': v, 'token': p, 'token_str': self.tokenizer.decode([p] ), 'sequence': sequence}
row.append(_lowerCAmelCase )
result.append(_lowerCAmelCase )
if single_mask:
return result[0]
return result
def __a ( self , _lowerCAmelCase , _lowerCAmelCase=None ):
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_lowercase : int = [targets]
try:
_lowercase : str = self.tokenizer.get_vocab()
except Exception:
_lowercase : str = {}
_lowercase : Dict = []
for target in targets:
_lowercase : List[str] = vocab.get(_lowerCAmelCase , _lowerCAmelCase )
if id_ is None:
_lowercase : List[str] = self.tokenizer(
_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase , max_length=1 , truncation=_lowerCAmelCase , )['input_ids']
if len(_lowerCAmelCase ) == 0:
logger.warning(
F"""The specified target token `{target}` does not exist in the model vocabulary. """
'We cannot replace it with anything meaningful, ignoring it' )
continue
_lowercase : List[str] = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F"""The specified target token `{target}` does not exist in the model vocabulary. """
F"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.""" )
target_ids.append(id_ )
_lowercase : Any = list(set(_lowerCAmelCase ) )
if len(_lowerCAmelCase ) == 0:
raise ValueError('At least one target must be provided when passed.' )
_lowercase : Optional[int] = np.array(_lowerCAmelCase )
return target_ids
def __a ( self , _lowerCAmelCase=None , _lowerCAmelCase=None ):
_lowercase : Any = {}
if targets is not None:
_lowercase : Optional[Any] = self.get_target_ids(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : Union[str, Any] = target_ids
if top_k is not None:
_lowercase : Optional[int] = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'fill-mask' , self.model.base_model_prefix , 'The tokenizer does not define a `mask_token`.' )
return {}, {}, postprocess_params
def __call__( self , _lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase ):
_lowercase : Dict = super().__call__(_lowerCAmelCase , **_lowerCAmelCase )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ) and len(_lowerCAmelCase ) == 1:
return outputs[0]
return outputs
| 677 |
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCamelCase = "platform"
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , ) -> Dict:
if attention_mask is None:
_lowercase : str = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
_lowercase : List[Any] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
_lowercase : List[str] = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_lowercase : Optional[int] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_lowercase : str = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=1_3 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase=9_9 , _lowerCAmelCase=1_6 , _lowerCAmelCase=2 , _lowerCAmelCase=4 , _lowerCAmelCase=4 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=3_2 , _lowerCAmelCase=2 , _lowerCAmelCase=1 , _lowerCAmelCase=0 , _lowerCAmelCase=0.02 , ):
_lowercase : List[str] = parent
_lowercase : List[Any] = batch_size
_lowercase : Optional[Any] = seq_length
_lowercase : Optional[Any] = is_training
_lowercase : Tuple = use_labels
_lowercase : Dict = vocab_size
_lowercase : Any = hidden_size
_lowercase : Optional[Any] = num_hidden_layers
_lowercase : Union[str, Any] = num_attention_heads
_lowercase : Tuple = intermediate_size
_lowercase : Any = hidden_act
_lowercase : Optional[Any] = hidden_dropout_prob
_lowercase : Tuple = attention_probs_dropout_prob
_lowercase : Any = max_position_embeddings
_lowercase : str = eos_token_id
_lowercase : int = pad_token_id
_lowercase : Tuple = bos_token_id
_lowercase : List[Any] = initializer_range
def __a ( self ):
_lowercase : str = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
_lowercase : List[Any] = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
_lowercase : List[str] = shift_tokens_right(_lowerCAmelCase , 1 , 2 )
_lowercase : Tuple = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_lowerCAmelCase , )
_lowercase : List[Any] = prepare_blenderbot_inputs_dict(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return config, inputs_dict
def __a ( self ):
_lowercase , _lowercase : Union[str, Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[Any] = 2_0
_lowercase : List[Any] = model_class_name(_lowerCAmelCase )
_lowercase : List[Any] = model.encode(inputs_dict['input_ids'] )
_lowercase , _lowercase : int = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
_lowercase : Optional[Any] = model.init_cache(decoder_input_ids.shape[0] , _lowerCAmelCase , _lowerCAmelCase )
_lowercase : Optional[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' )
_lowercase : int = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_lowercase : Union[str, Any] = model.decode(
decoder_input_ids[:, :-1] , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase , decoder_position_ids=_lowerCAmelCase , )
_lowercase : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
_lowercase : int = model.decode(
decoder_input_ids[:, -1:] , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_lowerCAmelCase , )
_lowercase : List[Any] = model.decode(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : Optional[int] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Dict = 2_0
_lowercase : Any = model_class_name(_lowerCAmelCase )
_lowercase : int = model.encode(inputs_dict['input_ids'] )
_lowercase , _lowercase : Optional[int] = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
_lowercase : Union[str, Any] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_lowercase : List[str] = model.init_cache(decoder_input_ids.shape[0] , _lowerCAmelCase , _lowerCAmelCase )
_lowercase : int = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_lowercase : List[Any] = model.decode(
decoder_input_ids[:, :-1] , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase , decoder_position_ids=_lowerCAmelCase , )
_lowercase : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
_lowercase : Union[str, Any] = model.decode(
decoder_input_ids[:, -1:] , _lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_lowerCAmelCase , decoder_position_ids=_lowerCAmelCase , )
_lowercase : Dict = model.decode(_lowerCAmelCase , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase )
_lowercase : Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
@require_flax
class lowerCAmelCase_ ( unittest.TestCase ):
_UpperCamelCase : Tuple = 99
def __a ( self ):
_lowercase : Dict = np.array(
[
[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2],
[6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2],
[5, 9_7, 1_7, 3_9, 9_4, 4_0, 2],
[7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2],
[8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2],
[5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding
[6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2],
[5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2],
[4_8, 6_1, 9, 2_4, 7_1, 8_2, 2],
[2_6, 1, 6_0, 4_8, 2_2, 1_3, 2],
[2_1, 5, 6_2, 2_8, 1_4, 7_6, 2],
[4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2],
[7_0, 7_0, 5_0, 9, 2_8, 0, 2],
] , dtype=np.intaa , )
_lowercase : Union[str, Any] = input_ids.shape[0]
_lowercase : Optional[int] = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def __a ( self ):
_lowercase , _lowercase , _lowercase : int = self._get_config_and_data()
_lowercase : Union[str, Any] = FlaxBlenderbotSmallForConditionalGeneration(_lowerCAmelCase )
_lowercase : Union[str, Any] = lm_model(input_ids=_lowerCAmelCase )
_lowercase : str = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['logits'].shape , _lowerCAmelCase )
def __a ( self ):
_lowercase : Union[str, Any] = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , )
_lowercase : Optional[int] = FlaxBlenderbotSmallForConditionalGeneration(_lowerCAmelCase )
_lowercase : Optional[Any] = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa )
_lowercase : Optional[int] = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa )
_lowercase : Dict = lm_model(input_ids=_lowerCAmelCase , decoder_input_ids=_lowerCAmelCase )
_lowercase : Tuple = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['logits'].shape , _lowerCAmelCase )
def __a ( self ):
_lowercase : Dict = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa )
_lowercase : Union[str, Any] = shift_tokens_right(_lowerCAmelCase , 1 , 2 )
_lowercase : Dict = np.equal(_lowerCAmelCase , 1 ).astype(np.floataa ).sum()
_lowercase : Dict = np.equal(_lowerCAmelCase , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(_lowerCAmelCase , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class lowerCAmelCase_ ( __snake_case , unittest.TestCase , __snake_case ):
_UpperCamelCase : int = True
_UpperCamelCase : Any = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
_UpperCamelCase : Any = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def __a ( self ):
_lowercase : List[str] = FlaxBlenderbotSmallModelTester(self )
def __a ( self ):
_lowercase , _lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def __a ( self ):
_lowercase , _lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def __a ( self ):
_lowercase , _lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowercase : Any = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : str = model_class(_lowerCAmelCase )
@jax.jit
def encode_jitted(_lowerCAmelCase , _lowerCAmelCase=None , **_lowerCAmelCase ):
return model.encode(input_ids=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
with self.subTest('JIT Enabled' ):
_lowercase : Dict = encode_jitted(**_lowerCAmelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
_lowercase : Dict = encode_jitted(**_lowerCAmelCase ).to_tuple()
self.assertEqual(len(_lowerCAmelCase ) , len(_lowerCAmelCase ) )
for jitted_output, output in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def __a ( self ):
_lowercase , _lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowercase : int = model_class(_lowerCAmelCase )
_lowercase : int = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] )
_lowercase : List[Any] = {
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
return model.decode(
decoder_input_ids=_lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , encoder_outputs=_lowerCAmelCase , )
with self.subTest('JIT Enabled' ):
_lowercase : Dict = decode_jitted(**_lowerCAmelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
_lowercase : Any = decode_jitted(**_lowerCAmelCase ).to_tuple()
self.assertEqual(len(_lowerCAmelCase ) , len(_lowerCAmelCase ) )
for jitted_output, output in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def __a ( self ):
for model_class_name in self.all_model_classes:
_lowercase : Dict = model_class_name.from_pretrained('facebook/blenderbot_small-90M' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
_lowercase : Any = np.ones((1, 1) ) * model.config.eos_token_id
_lowercase : int = model(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
| 677 | 1 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( __snake_case , unittest.TestCase ):
_UpperCamelCase : Any = XGLMTokenizer
_UpperCamelCase : Optional[int] = XGLMTokenizerFast
_UpperCamelCase : Tuple = True
_UpperCamelCase : str = True
def __a ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
_lowercase : Tuple = XGLMTokenizer(_lowerCAmelCase , keep_accents=_lowerCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def __a ( self ):
_lowercase : Optional[int] = '<pad>'
_lowercase : Optional[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCAmelCase ) , _lowerCAmelCase )
def __a ( self ):
_lowercase : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(len(_lowerCAmelCase ) , 1_0_0_8 )
def __a ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_8 )
def __a ( self ):
_lowercase : Union[str, Any] = XGLMTokenizer(_lowerCAmelCase , keep_accents=_lowerCAmelCase )
_lowercase : Any = tokenizer.tokenize('This is a test' )
self.assertListEqual(_lowerCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
_lowercase : str = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
_lowerCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
_lowercase : Optional[int] = tokenizer.convert_tokens_to_ids(_lowerCAmelCase )
self.assertListEqual(
_lowerCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
_lowercase : List[Any] = tokenizer.convert_ids_to_tokens(_lowerCAmelCase )
self.assertListEqual(
_lowerCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def __a ( self ):
return XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
def __a ( self ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(_lowerCAmelCase , f.name )
_lowercase : Optional[Any] = XGLMTokenizer(f.name , keep_accents=_lowerCAmelCase )
_lowercase : Dict = pickle.dumps(_lowerCAmelCase )
pickle.loads(_lowerCAmelCase )
def __a ( self ):
if not self.test_rust_tokenizer:
return
_lowercase : List[Any] = self.get_tokenizer()
_lowercase : List[Any] = self.get_rust_tokenizer()
_lowercase : Dict = 'I was born in 92000, and this is falsé.'
_lowercase : int = tokenizer.tokenize(_lowerCAmelCase )
_lowercase : int = rust_tokenizer.tokenize(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : Union[str, Any] = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
_lowercase : Optional[int] = rust_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : int = self.get_rust_tokenizer()
_lowercase : Dict = tokenizer.encode(_lowerCAmelCase )
_lowercase : Any = rust_tokenizer.encode(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
@slow
def __a ( self ):
_lowercase : str = 'Hello World!'
_lowercase : List[str] = [2, 3_1_2_2_7, 4_4_4_7, 3_5]
self.assertListEqual(_lowerCAmelCase , self.big_tokenizer.encode(_lowerCAmelCase ) )
@slow
def __a ( self ):
_lowercase : List[Any] = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth'
)
# fmt: off
_lowercase : Any = [2, 1_0_1_8, 6_7, 1_1, 1_9_8_8, 2_6_1_7, 5_6_3_1, 2_7_8, 1_1, 3_4_0_7, 4_8, 7_1_6_3_0, 2_8_0_8_5, 4, 3_2_3_4, 1_5_7, 1_3, 6, 5, 6, 4, 3_5_2_6, 7_6_8, 1_5, 6_5_9, 5_7, 2_9_8, 3_9_8_3, 8_6_4, 1_2_9, 2_1, 6, 5, 1_3_6_7_5, 3_7_7, 6_5_2, 7_5_8_0, 1_0_3_4_1, 1_5_5, 2_8_1_7, 4_2_2, 1_6_6_6, 7, 1_6_7_4, 5_3, 1_1_3, 2_0_2_2_7_7, 1_7_8_9_2, 3_3, 6_0, 8_7, 4, 3_2_3_4, 1_5_7, 6_1, 2_6_6_7, 5_2_3_7_6, 1_9, 8_8, 2_3, 7_3_5]
# fmt: on
self.assertListEqual(_lowerCAmelCase , self.big_tokenizer.encode(_lowerCAmelCase ) )
@slow
def __a ( self ):
# fmt: off
_lowercase : List[Any] = {
'input_ids': [[2, 1_0_8_8_2_5, 1_1_6_3, 1_5, 8_8_0_1_0, 4_7_3, 1_5_8_9_8, 1_5_7, 1_3_6_7_2, 1_8_5_7, 3_1_2, 8, 2_3_8_0_2_1, 1_1_6_3, 5_3, 1_3_6_7_2, 1_8_5_7, 3_1_2, 8, 5_3_2_8_3, 1_8_2_3_9_6, 8, 1_8_5_6_6, 1_6, 3_6_7_3_3, 4_1_0_1, 8, 2_3_0, 2_4_4_0_1_7, 1_2_2_5_5_3, 7, 1_5, 1_3_2_5_9_7, 4, 2_9_3, 1_2_5_1_1, 7_6_1_0, 4, 3_4_1_4, 1_3_2_5_9_7, 9, 4, 3_2_3_6_1, 3_6_2, 4, 7_3_4, 2_8_5_1_2, 3_2_5_6_9, 1_8, 4, 3_2_3_6_1, 2_6_0_9_6, 1_4_9_8_2, 7_3, 1_8_7_1_5, 2_1_4_3_3, 2_3_5_2_6_1, 1_5, 4_9_2, 1_2_4_2_7, 1_6, 5_3, 1_8_7_1_5, 2_1_4_3_3, 6_5_4_5_4, 1_5, 2_3_6_5_9, 5_6_3, 1_6, 2_7_8, 5_9_7, 2_8_4_3, 5_9_5, 7_9_3_1, 1_8_2_3_9_6, 6_4_1_8_6, 2_2, 8_8_6, 5_9_5, 1_3_2_9_8_1, 5_3, 2_5_5_4_0, 3_4_4_9, 4_3_9_8_2, 3_9_9_0_1, 5_9_5_1, 8_7_8, 3_3_0, 4, 2_7_6_9_4, 8_0_2_6_9, 3_1_2, 5_3, 6_5_1_7, 1_1_7_8_0, 6_1_1, 2_0_4_0_8, 5], [2, 6, 1_3_2_5_9_7, 6_7, 4_2_8_9_7, 3_3, 5_9_2, 8, 1_6_3_7_2_9, 2_5_5_4_0, 3_6_1, 1_3_6_9_9_7, 1_0_9_5_1_4, 1_7_3_2_3_0, 7, 5_0_1, 6_0, 1_0_2_9_1_3, 1_9_6, 5_6_3_1, 2_3_5, 6_3_2_4_3, 4_7_3, 6, 2_3_1_7_5_7, 7_4, 5_2_7_7, 7_9_0_5, 5_3, 3_0_9_5, 3_7_3_1_7, 2_2, 4_5_4, 1_8_3_8_7_4, 5], [2, 2_6_8, 3_1_2_9_8, 4_6_5_3_0, 6, 1_3_2_9_3_5, 4_3_8_3_1, 7, 5_9_7, 3_2, 2_4, 3_6_8_8, 9_8_6_5, 5]],
'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCAmelCase , model_name='facebook/xglm-564M' , padding=_lowerCAmelCase , )
| 677 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json",
"allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json",
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"
),
}
class lowerCAmelCase_ ( __snake_case ):
_UpperCamelCase : Dict = "longformer"
def __init__( self , _lowerCAmelCase = 5_1_2 , _lowerCAmelCase = 2 , _lowerCAmelCase = 1 , _lowerCAmelCase = 0 , _lowerCAmelCase = 2 , _lowerCAmelCase = 3_0_5_2_2 , _lowerCAmelCase = 7_6_8 , _lowerCAmelCase = 1_2 , _lowerCAmelCase = 1_2 , _lowerCAmelCase = 3_0_7_2 , _lowerCAmelCase = "gelu" , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 5_1_2 , _lowerCAmelCase = 2 , _lowerCAmelCase = 0.02 , _lowerCAmelCase = 1E-12 , _lowerCAmelCase = False , **_lowerCAmelCase , ):
super().__init__(pad_token_id=_lowerCAmelCase , **_lowerCAmelCase )
_lowercase : Optional[int] = attention_window
_lowercase : str = sep_token_id
_lowercase : Optional[Any] = bos_token_id
_lowercase : List[Any] = eos_token_id
_lowercase : Optional[Any] = vocab_size
_lowercase : List[Any] = hidden_size
_lowercase : Union[str, Any] = num_hidden_layers
_lowercase : Optional[int] = num_attention_heads
_lowercase : List[str] = hidden_act
_lowercase : List[str] = intermediate_size
_lowercase : List[Any] = hidden_dropout_prob
_lowercase : str = attention_probs_dropout_prob
_lowercase : Any = max_position_embeddings
_lowercase : int = type_vocab_size
_lowercase : Optional[int] = initializer_range
_lowercase : List[Any] = layer_norm_eps
_lowercase : List[str] = onnx_export
class lowerCAmelCase_ ( __snake_case ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase = "default" , _lowerCAmelCase = None ):
super().__init__(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
_lowercase : str = True
@property
def __a ( self ):
if self.task == "multiple-choice":
_lowercase : List[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowercase : int = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('global_attention_mask', dynamic_axis),
] )
@property
def __a ( self ):
_lowercase : Optional[int] = super().outputs
if self.task == "default":
_lowercase : List[str] = {0: 'batch'}
return outputs
@property
def __a ( self ):
return 1E-4
@property
def __a ( self ):
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 1_4 )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ):
_lowercase : int = super().generate_dummy_inputs(
preprocessor=_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
_lowercase : str = torch.zeros_like(inputs['input_ids'] )
# make every second token global
_lowercase : Any = 1
return inputs
| 677 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"naver-clova-ix/donut-base": "https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json",
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class lowerCAmelCase_ ( __snake_case ):
_UpperCamelCase : Optional[int] = "donut-swin"
_UpperCamelCase : int = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , _lowerCAmelCase=2_2_4 , _lowerCAmelCase=4 , _lowerCAmelCase=3 , _lowerCAmelCase=9_6 , _lowerCAmelCase=[2, 2, 6, 2] , _lowerCAmelCase=[3, 6, 1_2, 2_4] , _lowerCAmelCase=7 , _lowerCAmelCase=4.0 , _lowerCAmelCase=True , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.1 , _lowerCAmelCase="gelu" , _lowerCAmelCase=False , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-5 , **_lowerCAmelCase , ):
super().__init__(**_lowerCAmelCase )
_lowercase : List[Any] = image_size
_lowercase : Optional[int] = patch_size
_lowercase : Dict = num_channels
_lowercase : Any = embed_dim
_lowercase : Tuple = depths
_lowercase : Any = len(_lowerCAmelCase )
_lowercase : Dict = num_heads
_lowercase : Optional[int] = window_size
_lowercase : Optional[Any] = mlp_ratio
_lowercase : int = qkv_bias
_lowercase : Dict = hidden_dropout_prob
_lowercase : Tuple = attention_probs_dropout_prob
_lowercase : int = drop_path_rate
_lowercase : int = hidden_act
_lowercase : Any = use_absolute_embeddings
_lowercase : Union[str, Any] = layer_norm_eps
_lowercase : List[Any] = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowercase : str = int(embed_dim * 2 ** (len(_lowerCAmelCase ) - 1) )
| 677 |
from __future__ import annotations
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> bool:
return len(set(SCREAMING_SNAKE_CASE ) ) == len(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 677 | 1 |
from statistics import mean, stdev
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 3 ) -> list:
_lowercase : str = min(SCREAMING_SNAKE_CASE )
_lowercase : Any = max(SCREAMING_SNAKE_CASE )
# normalize data
return [round((x - x_min) / (x_max - x_min) , SCREAMING_SNAKE_CASE ) for x in data]
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 3 ) -> list:
_lowercase : int = mean(SCREAMING_SNAKE_CASE )
_lowercase : Any = stdev(SCREAMING_SNAKE_CASE )
# standardize data
return [round((x - mu) / (sigma) , SCREAMING_SNAKE_CASE ) for x in data]
| 677 |
import math
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0 , SCREAMING_SNAKE_CASE = 0 ) -> list:
_lowercase : List[str] = end or len(SCREAMING_SNAKE_CASE )
for i in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
_lowercase : Dict = i
_lowercase : str = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
_lowercase : Optional[Any] = array[temp_index - 1]
temp_index -= 1
_lowercase : Optional[Any] = temp_index_value
return array
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> None: # Max Heap
_lowercase : List[str] = index
_lowercase : List[str] = 2 * index + 1 # Left Node
_lowercase : Union[str, Any] = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
_lowercase : Any = left_index
if right_index < heap_size and array[largest] < array[right_index]:
_lowercase : str = right_index
if largest != index:
_lowercase , _lowercase : List[str] = array[largest], array[index]
heapify(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> list:
_lowercase : Optional[Any] = len(SCREAMING_SNAKE_CASE )
for i in range(n // 2 , -1 , -1 ):
heapify(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for i in range(n - 1 , 0 , -1 ):
_lowercase , _lowercase : List[Any] = array[0], array[i]
heapify(SCREAMING_SNAKE_CASE , 0 , SCREAMING_SNAKE_CASE )
return array
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
_lowercase : Optional[Any] = low
_lowercase : Tuple = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
_lowercase , _lowercase : Tuple = array[j], array[i]
i += 1
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> list:
if len(SCREAMING_SNAKE_CASE ) == 0:
return array
_lowercase : List[str] = 2 * math.ceil(math.loga(len(SCREAMING_SNAKE_CASE ) ) )
_lowercase : str = 16
return intro_sort(SCREAMING_SNAKE_CASE , 0 , len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> list:
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(SCREAMING_SNAKE_CASE )
max_depth -= 1
_lowercase : int = median_of_a(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , start + ((end - start) // 2) + 1 , end - 1 )
_lowercase : str = partition(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
intro_sort(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_lowercase : List[Any] = p
return insertion_sort(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase = input("Enter numbers separated by a comma : ").strip()
UpperCamelCase = [float(item) for item in user_input.split(",")]
print(sort(unsorted))
| 677 | 1 |
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
UpperCamelCase = importlib.util.find_spec("s3fs") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
UpperCamelCase = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f'''A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.''')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> str:
if "://" in dataset_path:
_lowercase : Optional[Any] = dataset_path.split('://' )[1]
return dataset_path
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> bool:
if fs is not None and fs.protocol != "file":
return True
else:
return False
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
_lowercase : Dict = not is_remote_filesystem(SCREAMING_SNAKE_CASE )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(SCREAMING_SNAKE_CASE ) , fs._strip_protocol(SCREAMING_SNAKE_CASE ) )
else:
fs.mv(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , recursive=SCREAMING_SNAKE_CASE )
def __magic_name__ ( ) -> None:
if hasattr(fsspec.asyn , 'reset_lock' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
_lowercase : List[Any] = None
_lowercase : Union[str, Any] = None
_lowercase : List[Any] = threading.Lock()
| 677 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["CLIPFeatureExtractor"]
UpperCamelCase = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 677 | 1 |
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class lowerCAmelCase_ ( datasets.BuilderConfig ):
_UpperCamelCase : Optional[datasets.Features] = None
class lowerCAmelCase_ ( datasets.ArrowBasedBuilder ):
_UpperCamelCase : int = PandasConfig
def __a ( self ):
return datasets.DatasetInfo(features=self.config.features )
def __a ( self , _lowerCAmelCase ):
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
_lowercase : int = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_lowerCAmelCase , (str, list, tuple) ):
_lowercase : Dict = data_files
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[int] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowercase : Optional[int] = [dl_manager.iter_files(_lowerCAmelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )]
_lowercase : str = []
for split_name, files in data_files.items():
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_lowercase : int = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowercase : Optional[Any] = [dl_manager.iter_files(_lowerCAmelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=_lowerCAmelCase , gen_kwargs={'files': files} ) )
return splits
def __a ( self , _lowerCAmelCase ):
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_lowercase : List[str] = table_cast(_lowerCAmelCase , self.config.features.arrow_schema )
return pa_table
def __a ( self , _lowerCAmelCase ):
for i, file in enumerate(itertools.chain.from_iterable(_lowerCAmelCase ) ):
with open(_lowerCAmelCase , 'rb' ) as f:
_lowercase : Optional[int] = pa.Table.from_pandas(pd.read_pickle(_lowerCAmelCase ) )
yield i, self._cast_table(_lowerCAmelCase )
| 677 |
from collections.abc import Sequence
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float:
return sum(c * (x**i) for i, c in enumerate(SCREAMING_SNAKE_CASE ) )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float:
_lowercase : Optional[Any] = 0.0
for coeff in reversed(SCREAMING_SNAKE_CASE ):
_lowercase : Optional[int] = result * x + coeff
return result
if __name__ == "__main__":
UpperCamelCase = (0.0, 0.0, 5.0, 9.3, 7.0)
UpperCamelCase = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 677 | 1 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
def __a ( self ):
_lowercase : Tuple = tempfile.mkdtemp()
_lowercase : Any = BlipImageProcessor()
_lowercase : List[Any] = GPTaTokenizer.from_pretrained('hf-internal-testing/tiny-random-GPT2Model' )
_lowercase : List[str] = BertTokenizerFast.from_pretrained('hf-internal-testing/tiny-random-bert' )
_lowercase : Dict = InstructBlipProcessor(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
def __a ( self , **_lowerCAmelCase ):
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowerCAmelCase ).tokenizer
def __a ( self , **_lowerCAmelCase ):
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowerCAmelCase ).image_processor
def __a ( self , **_lowerCAmelCase ):
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowerCAmelCase ).qformer_tokenizer
def __a ( self ):
shutil.rmtree(self.tmpdirname )
def __a ( self ):
_lowercase : List[str] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
_lowercase : str = [Image.fromarray(np.moveaxis(_lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __a ( self ):
_lowercase : List[Any] = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
_lowercase : str = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
_lowercase : Tuple = self.get_image_processor(do_normalize=_lowerCAmelCase , padding_value=1.0 )
_lowercase : List[Any] = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCAmelCase )
self.assertIsInstance(processor.qformer_tokenizer , _lowerCAmelCase )
def __a ( self ):
_lowercase : Any = self.get_image_processor()
_lowercase : Dict = self.get_tokenizer()
_lowercase : Optional[Any] = self.get_qformer_tokenizer()
_lowercase : Dict = InstructBlipProcessor(
tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase , qformer_tokenizer=_lowerCAmelCase )
_lowercase : List[Any] = self.prepare_image_inputs()
_lowercase : Union[str, Any] = image_processor(_lowerCAmelCase , return_tensors='np' )
_lowercase : Any = processor(images=_lowerCAmelCase , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __a ( self ):
_lowercase : Optional[int] = self.get_image_processor()
_lowercase : str = self.get_tokenizer()
_lowercase : Optional[Any] = self.get_qformer_tokenizer()
_lowercase : Optional[int] = InstructBlipProcessor(
tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase , qformer_tokenizer=_lowerCAmelCase )
_lowercase : Optional[int] = 'lower newer'
_lowercase : str = processor(text=_lowerCAmelCase )
_lowercase : Optional[int] = tokenizer(_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase )
_lowercase : Optional[Any] = qformer_tokenizer(_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['qformer_' + key] )
def __a ( self ):
_lowercase : List[Any] = self.get_image_processor()
_lowercase : Optional[Any] = self.get_tokenizer()
_lowercase : Dict = self.get_qformer_tokenizer()
_lowercase : Optional[Any] = InstructBlipProcessor(
tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase , qformer_tokenizer=_lowerCAmelCase )
_lowercase : Optional[int] = 'lower newer'
_lowercase : Any = self.prepare_image_inputs()
_lowercase : int = processor(text=_lowerCAmelCase , images=_lowerCAmelCase )
self.assertListEqual(
list(inputs.keys() ) , ['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'] , )
# test if it raises when no input is passed
with pytest.raises(_lowerCAmelCase ):
processor()
def __a ( self ):
_lowercase : Any = self.get_image_processor()
_lowercase : Union[str, Any] = self.get_tokenizer()
_lowercase : Dict = self.get_qformer_tokenizer()
_lowercase : List[str] = InstructBlipProcessor(
tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase , qformer_tokenizer=_lowerCAmelCase )
_lowercase : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowercase : List[str] = processor.batch_decode(_lowerCAmelCase )
_lowercase : List[str] = tokenizer.batch_decode(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def __a ( self ):
_lowercase : Tuple = self.get_image_processor()
_lowercase : Optional[int] = self.get_tokenizer()
_lowercase : str = self.get_qformer_tokenizer()
_lowercase : List[Any] = InstructBlipProcessor(
tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase , qformer_tokenizer=_lowerCAmelCase )
_lowercase : Optional[Any] = 'lower newer'
_lowercase : int = self.prepare_image_inputs()
_lowercase : Union[str, Any] = processor(text=_lowerCAmelCase , images=_lowerCAmelCase )
self.assertListEqual(
list(inputs.keys() ) , ['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'] , )
| 677 |
from __future__ import annotations
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase=None ):
_lowercase : int = data
_lowercase : Union[str, Any] = None
def __repr__( self ):
_lowercase : Dict = []
_lowercase : Tuple = self
while temp:
string_rep.append(F"""{temp.data}""" )
_lowercase : Optional[Any] = temp.next
return "->".join(_lowerCAmelCase )
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Any:
if not elements_list:
raise Exception('The Elements List is empty' )
_lowercase : Union[str, Any] = Node(elements_list[0] )
for i in range(1 , len(SCREAMING_SNAKE_CASE ) ):
_lowercase : Optional[int] = Node(elements_list[i] )
_lowercase : List[Any] = current.next
return head
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> None:
if head_node is not None and isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
print_reverse(head_node.next )
print(head_node.data )
def __magic_name__ ( ) -> List[str]:
from doctest import testmod
testmod()
_lowercase : int = make_linked_list([14, 52, 14, 12, 43] )
print('Linked List:' )
print(SCREAMING_SNAKE_CASE )
print('Elements in Reverse:' )
print_reverse(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 677 | 1 |
from __future__ import annotations
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> list[list[int]]:
_lowercase : list[list[int]] = []
create_all_state(1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , [] , SCREAMING_SNAKE_CASE )
return result
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> None:
if level == 0:
total_list.append(current_list[:] )
return
for i in range(SCREAMING_SNAKE_CASE , total_number - level + 2 ):
current_list.append(SCREAMING_SNAKE_CASE )
create_all_state(i + 1 , SCREAMING_SNAKE_CASE , level - 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
current_list.pop()
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> None:
for i in total_list:
print(*SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCamelCase = 4
UpperCamelCase = 2
UpperCamelCase = generate_all_combinations(n, k)
print_all_state(total_list)
| 677 |
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
UpperCamelCase = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
UpperCamelCase = typing.Union[np.floataa, int, float] # noqa: UP007
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> VectorOut:
return np.sqrt(np.sum((np.asarray(SCREAMING_SNAKE_CASE ) - np.asarray(SCREAMING_SNAKE_CASE )) ** 2 ) )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> VectorOut:
return sum((va - va) ** 2 for va, va in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) ** (1 / 2)
if __name__ == "__main__":
def __magic_name__ ( ) -> None:
from timeit import timeit
print('Without Numpy' )
print(
timeit(
'euclidean_distance_no_np([1, 2, 3], [4, 5, 6])' , number=10_000 , globals=globals() , ) )
print('With Numpy' )
print(
timeit(
'euclidean_distance([1, 2, 3], [4, 5, 6])' , number=10_000 , globals=globals() , ) )
benchmark()
| 677 | 1 |
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> List[str]:
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> list[tuple[int, int]]:
_lowercase : Optional[Any] = 0
_lowercase : Union[str, Any] = len(SCREAMING_SNAKE_CASE ) # No of vertices in graph
_lowercase : str = [0] * n
_lowercase : str = [False] * n
def dfs(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
_lowercase : Any = True
_lowercase : str = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , id_ )
_lowercase : Optional[int] = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
_lowercase : Dict = min(low[at] , low[to] )
_lowercase : list[tuple[int, int]] = []
for i in range(SCREAMING_SNAKE_CASE ):
if not visited[i]:
dfs(SCREAMING_SNAKE_CASE , -1 , SCREAMING_SNAKE_CASE , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 677 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase = {
"configuration_swinv2": ["SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Swinv2Config"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Swinv2ForImageClassification",
"Swinv2ForMaskedImageModeling",
"Swinv2Model",
"Swinv2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 677 | 1 |
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> list:
_lowercase : List[Any] = word.split()
def justify(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
_lowercase : List[Any] = max_width - width
_lowercase : int = len(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
_lowercase : Any = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
_lowercase : List[Any] = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
_lowercase : List[str] = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(SCREAMING_SNAKE_CASE ):
num_spaces_between_words_list[i] += 1
_lowercase : int = []
for i in range(SCREAMING_SNAKE_CASE ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * ' ' )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(SCREAMING_SNAKE_CASE )
_lowercase : Any = []
_lowercase : list[str] = []
_lowercase : Optional[int] = 0
for word in words:
if width + len(SCREAMING_SNAKE_CASE ) + len(SCREAMING_SNAKE_CASE ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(SCREAMING_SNAKE_CASE )
width += len(SCREAMING_SNAKE_CASE )
else:
# justify the line and add it to result
answer.append(justify(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
# reset new line and new width
_lowercase , _lowercase : str = [word], len(SCREAMING_SNAKE_CASE )
_lowercase : Dict = max_width - width - len(SCREAMING_SNAKE_CASE )
answer.append(' '.join(SCREAMING_SNAKE_CASE ) + (remaining_spaces + 1) * ' ' )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 677 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
UpperCamelCase = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase = {
"vocab_file": {
"google/electra-small-generator": (
"https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt"
),
"google/electra-base-generator": "https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt",
"google/electra-large-generator": (
"https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt"
),
"google/electra-small-discriminator": (
"https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt"
),
"google/electra-base-discriminator": (
"https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt"
),
"google/electra-large-discriminator": (
"https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"google/electra-small-generator": (
"https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json"
),
"google/electra-base-generator": (
"https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json"
),
"google/electra-large-generator": (
"https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json"
),
"google/electra-small-discriminator": (
"https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json"
),
"google/electra-base-discriminator": (
"https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json"
),
"google/electra-large-discriminator": (
"https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json"
),
},
}
UpperCamelCase = {
"google/electra-small-generator": 512,
"google/electra-base-generator": 512,
"google/electra-large-generator": 512,
"google/electra-small-discriminator": 512,
"google/electra-base-discriminator": 512,
"google/electra-large-discriminator": 512,
}
UpperCamelCase = {
"google/electra-small-generator": {"do_lower_case": True},
"google/electra-base-generator": {"do_lower_case": True},
"google/electra-large-generator": {"do_lower_case": True},
"google/electra-small-discriminator": {"do_lower_case": True},
"google/electra-base-discriminator": {"do_lower_case": True},
"google/electra-large-discriminator": {"do_lower_case": True},
}
class lowerCAmelCase_ ( __snake_case ):
_UpperCamelCase : Any = VOCAB_FILES_NAMES
_UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : str = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : List[str] = ElectraTokenizer
def __init__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase="[UNK]" , _lowerCAmelCase="[SEP]" , _lowerCAmelCase="[PAD]" , _lowerCAmelCase="[CLS]" , _lowerCAmelCase="[MASK]" , _lowerCAmelCase=True , _lowerCAmelCase=None , **_lowerCAmelCase , ):
super().__init__(
_lowerCAmelCase , tokenizer_file=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , tokenize_chinese_chars=_lowerCAmelCase , strip_accents=_lowerCAmelCase , **_lowerCAmelCase , )
_lowercase : Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _lowerCAmelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , _lowerCAmelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _lowerCAmelCase ) != tokenize_chinese_chars
):
_lowercase : Any = getattr(_lowerCAmelCase , normalizer_state.pop('type' ) )
_lowercase : Dict = do_lower_case
_lowercase : Optional[Any] = strip_accents
_lowercase : Any = tokenize_chinese_chars
_lowercase : Tuple = normalizer_class(**_lowerCAmelCase )
_lowercase : Union[str, Any] = do_lower_case
def __a ( self , _lowerCAmelCase , _lowerCAmelCase=None ):
_lowercase : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __a ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
_lowercase : str = [self.sep_token_id]
_lowercase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
_lowercase : Any = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
| 677 | 1 |
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class lowerCAmelCase_ ( nn.Module ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=0.0 , _lowerCAmelCase = None , _lowerCAmelCase = "geglu" , _lowerCAmelCase = None , _lowerCAmelCase = False , _lowerCAmelCase = False , _lowerCAmelCase = False , _lowerCAmelCase = False , _lowerCAmelCase = True , _lowerCAmelCase = "layer_norm" , _lowerCAmelCase = False , ):
super().__init__()
_lowercase : List[str] = only_cross_attention
_lowercase : Any = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm_zero'
_lowercase : str = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm'
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
F"""`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to"""
F""" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.""" )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
_lowercase : List[Any] = AdaLayerNorm(_lowerCAmelCase , _lowerCAmelCase )
elif self.use_ada_layer_norm_zero:
_lowercase : str = AdaLayerNormZero(_lowerCAmelCase , _lowerCAmelCase )
else:
_lowercase : str = nn.LayerNorm(_lowerCAmelCase , elementwise_affine=_lowerCAmelCase )
_lowercase : str = Attention(
query_dim=_lowerCAmelCase , heads=_lowerCAmelCase , dim_head=_lowerCAmelCase , dropout=_lowerCAmelCase , bias=_lowerCAmelCase , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=_lowerCAmelCase , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
_lowercase : Any = (
AdaLayerNorm(_lowerCAmelCase , _lowerCAmelCase )
if self.use_ada_layer_norm
else nn.LayerNorm(_lowerCAmelCase , elementwise_affine=_lowerCAmelCase )
)
_lowercase : int = Attention(
query_dim=_lowerCAmelCase , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=_lowerCAmelCase , dim_head=_lowerCAmelCase , dropout=_lowerCAmelCase , bias=_lowerCAmelCase , upcast_attention=_lowerCAmelCase , ) # is self-attn if encoder_hidden_states is none
else:
_lowercase : List[str] = None
_lowercase : str = None
# 3. Feed-forward
_lowercase : List[str] = nn.LayerNorm(_lowerCAmelCase , elementwise_affine=_lowerCAmelCase )
_lowercase : int = FeedForward(_lowerCAmelCase , dropout=_lowerCAmelCase , activation_fn=_lowerCAmelCase , final_dropout=_lowerCAmelCase )
# let chunk size default to None
_lowercase : Optional[int] = None
_lowercase : List[Any] = 0
def __a ( self , _lowerCAmelCase , _lowerCAmelCase ):
# Sets chunk feed-forward
_lowercase : str = chunk_size
_lowercase : Optional[Any] = dim
def __a ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , ):
# Notice that normalization is always applied before the real computation in the following blocks.
# 1. Self-Attention
if self.use_ada_layer_norm:
_lowercase : Dict = self.norma(_lowerCAmelCase , _lowerCAmelCase )
elif self.use_ada_layer_norm_zero:
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase : Any = self.norma(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hidden_dtype=hidden_states.dtype )
else:
_lowercase : Optional[Any] = self.norma(_lowerCAmelCase )
_lowercase : List[Any] = cross_attention_kwargs if cross_attention_kwargs is not None else {}
_lowercase : Dict = self.attna(
_lowerCAmelCase , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=_lowerCAmelCase , **_lowerCAmelCase , )
if self.use_ada_layer_norm_zero:
_lowercase : Union[str, Any] = gate_msa.unsqueeze(1 ) * attn_output
_lowercase : Dict = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
_lowercase : List[Any] = (
self.norma(_lowerCAmelCase , _lowerCAmelCase ) if self.use_ada_layer_norm else self.norma(_lowerCAmelCase )
)
_lowercase : List[Any] = self.attna(
_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , attention_mask=_lowerCAmelCase , **_lowerCAmelCase , )
_lowercase : Tuple = attn_output + hidden_states
# 3. Feed-forward
_lowercase : Tuple = self.norma(_lowerCAmelCase )
if self.use_ada_layer_norm_zero:
_lowercase : int = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
F"""`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.""" )
_lowercase : List[str] = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
_lowercase : Optional[Any] = torch.cat(
[self.ff(_lowerCAmelCase ) for hid_slice in norm_hidden_states.chunk(_lowerCAmelCase , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
_lowercase : Union[str, Any] = self.ff(_lowerCAmelCase )
if self.use_ada_layer_norm_zero:
_lowercase : str = gate_mlp.unsqueeze(1 ) * ff_output
_lowercase : int = ff_output + hidden_states
return hidden_states
class lowerCAmelCase_ ( nn.Module ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = 4 , _lowerCAmelCase = 0.0 , _lowerCAmelCase = "geglu" , _lowerCAmelCase = False , ):
super().__init__()
_lowercase : Dict = int(dim * mult )
_lowercase : Optional[Any] = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
_lowercase : Dict = GELU(_lowerCAmelCase , _lowerCAmelCase )
if activation_fn == "gelu-approximate":
_lowercase : str = GELU(_lowerCAmelCase , _lowerCAmelCase , approximate='tanh' )
elif activation_fn == "geglu":
_lowercase : str = GEGLU(_lowerCAmelCase , _lowerCAmelCase )
elif activation_fn == "geglu-approximate":
_lowercase : Optional[Any] = ApproximateGELU(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : str = nn.ModuleList([] )
# project in
self.net.append(_lowerCAmelCase )
# project dropout
self.net.append(nn.Dropout(_lowerCAmelCase ) )
# project out
self.net.append(nn.Linear(_lowerCAmelCase , _lowerCAmelCase ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(_lowerCAmelCase ) )
def __a ( self , _lowerCAmelCase ):
for module in self.net:
_lowercase : Dict = module(_lowerCAmelCase )
return hidden_states
class lowerCAmelCase_ ( nn.Module ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = "none" ):
super().__init__()
_lowercase : List[str] = nn.Linear(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : Dict = approximate
def __a ( self , _lowerCAmelCase ):
if gate.device.type != "mps":
return F.gelu(_lowerCAmelCase , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def __a ( self , _lowerCAmelCase ):
_lowercase : List[str] = self.proj(_lowerCAmelCase )
_lowercase : List[Any] = self.gelu(_lowerCAmelCase )
return hidden_states
class lowerCAmelCase_ ( nn.Module ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ):
super().__init__()
_lowercase : Optional[Any] = nn.Linear(_lowerCAmelCase , dim_out * 2 )
def __a ( self , _lowerCAmelCase ):
if gate.device.type != "mps":
return F.gelu(_lowerCAmelCase )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def __a ( self , _lowerCAmelCase ):
_lowercase , _lowercase : Optional[int] = self.proj(_lowerCAmelCase ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(_lowerCAmelCase )
class lowerCAmelCase_ ( nn.Module ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ):
super().__init__()
_lowercase : List[Any] = nn.Linear(_lowerCAmelCase , _lowerCAmelCase )
def __a ( self , _lowerCAmelCase ):
_lowercase : str = self.proj(_lowerCAmelCase )
return x * torch.sigmoid(1.7_02 * x )
class lowerCAmelCase_ ( nn.Module ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ):
super().__init__()
_lowercase : Dict = nn.Embedding(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : int = nn.SiLU()
_lowercase : Union[str, Any] = nn.Linear(_lowerCAmelCase , embedding_dim * 2 )
_lowercase : Optional[int] = nn.LayerNorm(_lowerCAmelCase , elementwise_affine=_lowerCAmelCase )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[int] = self.linear(self.silu(self.emb(_lowerCAmelCase ) ) )
_lowercase , _lowercase : Optional[Any] = torch.chunk(_lowerCAmelCase , 2 )
_lowercase : Any = self.norm(_lowerCAmelCase ) * (1 + scale) + shift
return x
class lowerCAmelCase_ ( nn.Module ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ):
super().__init__()
_lowercase : Tuple = CombinedTimestepLabelEmbeddings(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : List[Any] = nn.SiLU()
_lowercase : List[Any] = nn.Linear(_lowerCAmelCase , 6 * embedding_dim , bias=_lowerCAmelCase )
_lowercase : int = nn.LayerNorm(_lowerCAmelCase , elementwise_affine=_lowerCAmelCase , eps=1E-6 )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None ):
_lowercase : Tuple = self.linear(self.silu(self.emb(_lowerCAmelCase , _lowerCAmelCase , hidden_dtype=_lowerCAmelCase ) ) )
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase : Tuple = emb.chunk(6 , dim=1 )
_lowercase : Optional[int] = self.norm(_lowerCAmelCase ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class lowerCAmelCase_ ( nn.Module ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = 1E-5 ):
super().__init__()
_lowercase : int = num_groups
_lowercase : List[str] = eps
if act_fn is None:
_lowercase : Tuple = None
else:
_lowercase : List[Any] = get_activation(_lowerCAmelCase )
_lowercase : str = nn.Linear(_lowerCAmelCase , out_dim * 2 )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase ):
if self.act:
_lowercase : List[Any] = self.act(_lowerCAmelCase )
_lowercase : str = self.linear(_lowerCAmelCase )
_lowercase : int = emb[:, :, None, None]
_lowercase , _lowercase : Dict = emb.chunk(2 , dim=1 )
_lowercase : Optional[int] = F.group_norm(_lowerCAmelCase , self.num_groups , eps=self.eps )
_lowercase : List[str] = x * (1 + scale) + shift
return x
| 677 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 677 | 1 |
import os
import sys
import unittest
UpperCamelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
UpperCamelCase = os.path.join("tests", "models", "bert", "test_modeling_bert.py")
UpperCamelCase = os.path.join("tests", "models", "blip", "test_modeling_blip.py")
class lowerCAmelCase_ ( unittest.TestCase ):
def __a ( self ):
_lowercase : List[str] = get_test_to_tester_mapping(_lowerCAmelCase )
_lowercase : List[Any] = get_test_to_tester_mapping(_lowerCAmelCase )
_lowercase : Any = {'BertModelTest': 'BertModelTester'}
_lowercase : Union[str, Any] = {
'BlipModelTest': 'BlipModelTester',
'BlipTextImageModelTest': 'BlipTextImageModelsModelTester',
'BlipTextModelTest': 'BlipTextModelTester',
'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester',
'BlipVQAModelTest': 'BlipVQAModelTester',
'BlipVisionModelTest': 'BlipVisionModelTester',
}
self.assertEqual(get_test_info.to_json(_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(get_test_info.to_json(_lowerCAmelCase ) , _lowerCAmelCase )
def __a ( self ):
_lowercase : List[Any] = get_model_to_test_mapping(_lowerCAmelCase )
_lowercase : List[Any] = get_model_to_test_mapping(_lowerCAmelCase )
_lowercase : List[str] = {
'BertForMaskedLM': ['BertModelTest'],
'BertForMultipleChoice': ['BertModelTest'],
'BertForNextSentencePrediction': ['BertModelTest'],
'BertForPreTraining': ['BertModelTest'],
'BertForQuestionAnswering': ['BertModelTest'],
'BertForSequenceClassification': ['BertModelTest'],
'BertForTokenClassification': ['BertModelTest'],
'BertLMHeadModel': ['BertModelTest'],
'BertModel': ['BertModelTest'],
}
_lowercase : Optional[int] = {
'BlipForConditionalGeneration': ['BlipTextImageModelTest'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'],
'BlipForQuestionAnswering': ['BlipVQAModelTest'],
'BlipModel': ['BlipModelTest'],
'BlipTextModel': ['BlipTextModelTest'],
'BlipVisionModel': ['BlipVisionModelTest'],
}
self.assertEqual(get_test_info.to_json(_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(get_test_info.to_json(_lowerCAmelCase ) , _lowerCAmelCase )
def __a ( self ):
_lowercase : str = get_model_to_tester_mapping(_lowerCAmelCase )
_lowercase : Tuple = get_model_to_tester_mapping(_lowerCAmelCase )
_lowercase : List[str] = {
'BertForMaskedLM': ['BertModelTester'],
'BertForMultipleChoice': ['BertModelTester'],
'BertForNextSentencePrediction': ['BertModelTester'],
'BertForPreTraining': ['BertModelTester'],
'BertForQuestionAnswering': ['BertModelTester'],
'BertForSequenceClassification': ['BertModelTester'],
'BertForTokenClassification': ['BertModelTester'],
'BertLMHeadModel': ['BertModelTester'],
'BertModel': ['BertModelTester'],
}
_lowercase : Any = {
'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'],
'BlipForQuestionAnswering': ['BlipVQAModelTester'],
'BlipModel': ['BlipModelTester'],
'BlipTextModel': ['BlipTextModelTester'],
'BlipVisionModel': ['BlipVisionModelTester'],
}
self.assertEqual(get_test_info.to_json(_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(get_test_info.to_json(_lowerCAmelCase ) , _lowerCAmelCase )
| 677 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
for attribute in key.split('.' ):
_lowercase : Union[str, Any] = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if weight_type is not None:
_lowercase : Optional[int] = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).shape
else:
_lowercase : Optional[Any] = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
_lowercase : List[str] = value
elif weight_type == "weight_g":
_lowercase : Any = value
elif weight_type == "weight_v":
_lowercase : Tuple = value
elif weight_type == "bias":
_lowercase : List[str] = value
else:
_lowercase : Dict = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
_lowercase : Optional[int] = []
_lowercase : Optional[int] = fairseq_model.state_dict()
_lowercase : Dict = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
_lowercase : Dict = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == 'group' , )
_lowercase : int = True
else:
for key, mapped_key in MAPPING.items():
_lowercase : Union[str, Any] = 'hubert.' + mapped_key if (is_finetuned and mapped_key != 'lm_head') else mapped_key
if key in name or (key.split('w2v_model.' )[-1] == name.split('.' )[0] and not is_finetuned):
_lowercase : Union[str, Any] = True
if "*" in mapped_key:
_lowercase : Dict = name.split(SCREAMING_SNAKE_CASE )[0].split('.' )[-2]
_lowercase : Dict = mapped_key.replace('*' , SCREAMING_SNAKE_CASE )
if "weight_g" in name:
_lowercase : Optional[int] = 'weight_g'
elif "weight_v" in name:
_lowercase : Optional[Any] = 'weight_v'
elif "weight" in name:
_lowercase : str = 'weight'
elif "bias" in name:
_lowercase : Any = 'bias'
else:
_lowercase : str = None
set_recursively(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE )
logger.warning(F"""Unused weights: {unused_weights}""" )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
_lowercase : Any = full_name.split('conv_layers.' )[-1]
_lowercase : Any = name.split('.' )
_lowercase : Optional[Any] = int(items[0] )
_lowercase : List[str] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
_lowercase : Optional[Any] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
_lowercase : List[str] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
_lowercase : Union[str, Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
_lowercase : List[Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True ) -> Optional[Any]:
if config_path is not None:
_lowercase : Optional[int] = HubertConfig.from_pretrained(SCREAMING_SNAKE_CASE )
else:
_lowercase : List[Any] = HubertConfig()
if is_finetuned:
if dict_path:
_lowercase : List[str] = Dictionary.load(SCREAMING_SNAKE_CASE )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_lowercase : Dict = target_dict.pad_index
_lowercase : Dict = target_dict.bos_index
_lowercase : Tuple = target_dict.eos_index
_lowercase : List[Any] = len(target_dict.symbols )
_lowercase : Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE , 'vocab.json' )
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(SCREAMING_SNAKE_CASE ) )
return
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
with open(SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(target_dict.indices , SCREAMING_SNAKE_CASE )
_lowercase : int = WavaVecaCTCTokenizer(
SCREAMING_SNAKE_CASE , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=SCREAMING_SNAKE_CASE , )
_lowercase : str = True if config.feat_extract_norm == 'layer' else False
_lowercase : Optional[int] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , )
_lowercase : Tuple = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
_lowercase : List[Any] = HubertForCTC(SCREAMING_SNAKE_CASE )
else:
_lowercase : List[Any] = HubertModel(SCREAMING_SNAKE_CASE )
if is_finetuned:
_lowercase , _lowercase , _lowercase : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
_lowercase , _lowercase , _lowercase : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
_lowercase : int = model[0].eval()
recursively_load_weights(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
UpperCamelCase = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 677 | 1 |
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
UpperCamelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( __snake_case ):
_UpperCamelCase : str = "linear"
_UpperCamelCase : Tuple = "cosine"
_UpperCamelCase : List[Any] = "cosine_with_restarts"
_UpperCamelCase : Dict = "polynomial"
_UpperCamelCase : Optional[Any] = "constant"
_UpperCamelCase : Optional[int] = "constant_with_warmup"
_UpperCamelCase : List[str] = "piecewise_constant"
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 ) -> str:
return LambdaLR(SCREAMING_SNAKE_CASE , lambda SCREAMING_SNAKE_CASE : 1 , last_epoch=SCREAMING_SNAKE_CASE )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 ) -> List[str]:
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1.0 , SCREAMING_SNAKE_CASE ) )
return 1.0
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 ) -> List[Any]:
_lowercase : Union[str, Any] = {}
_lowercase : Union[str, Any] = step_rules.split(',' )
for rule_str in rule_list[:-1]:
_lowercase , _lowercase : Any = rule_str.split(':' )
_lowercase : Any = int(SCREAMING_SNAKE_CASE )
_lowercase : Optional[int] = float(SCREAMING_SNAKE_CASE )
_lowercase : Any = value
_lowercase : Optional[Any] = float(rule_list[-1] )
def create_rules_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
def rule_func(SCREAMING_SNAKE_CASE ) -> float:
_lowercase : Any = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(SCREAMING_SNAKE_CASE ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
_lowercase : Any = create_rules_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=-1 ) -> Optional[int]:
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0.5 , SCREAMING_SNAKE_CASE = -1 ) -> int:
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) )
_lowercase : Tuple = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(SCREAMING_SNAKE_CASE ) * 2.0 * progress )) )
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = -1 ) -> int:
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) )
_lowercase : Optional[int] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(SCREAMING_SNAKE_CASE ) * progress) % 1.0) )) )
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=1E-7 , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE=-1 ) -> Dict:
_lowercase : List[Any] = optimizer.defaults['lr']
if not (lr_init > lr_end):
raise ValueError(F"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
_lowercase : Optional[int] = lr_init - lr_end
_lowercase : List[str] = num_training_steps - num_warmup_steps
_lowercase : Any = 1 - (current_step - num_warmup_steps) / decay_steps
_lowercase : Tuple = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 1.0 , SCREAMING_SNAKE_CASE = -1 , ) -> Any:
_lowercase : Union[str, Any] = SchedulerType(SCREAMING_SNAKE_CASE )
_lowercase : str = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(SCREAMING_SNAKE_CASE , step_rules=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , num_training_steps=SCREAMING_SNAKE_CASE , num_cycles=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , num_training_steps=SCREAMING_SNAKE_CASE , power=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE , )
return schedule_func(
SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , num_training_steps=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
| 677 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=1_3 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=9_9 , _lowerCAmelCase=3_2 , _lowerCAmelCase=2 , _lowerCAmelCase=4 , _lowerCAmelCase=3_7 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=5_1_2 , _lowerCAmelCase=1_6 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=None , _lowerCAmelCase=1_0_0_0 , ):
_lowercase : List[str] = parent
_lowercase : Optional[Any] = batch_size
_lowercase : str = seq_length
_lowercase : Dict = is_training
_lowercase : Optional[int] = use_input_mask
_lowercase : List[Any] = use_token_type_ids
_lowercase : Union[str, Any] = use_labels
_lowercase : Optional[Any] = vocab_size
_lowercase : Optional[Any] = hidden_size
_lowercase : str = num_hidden_layers
_lowercase : Tuple = num_attention_heads
_lowercase : Optional[Any] = intermediate_size
_lowercase : Optional[Any] = hidden_act
_lowercase : Union[str, Any] = hidden_dropout_prob
_lowercase : Union[str, Any] = attention_probs_dropout_prob
_lowercase : int = max_position_embeddings
_lowercase : str = type_vocab_size
_lowercase : Tuple = type_sequence_label_size
_lowercase : Dict = initializer_range
_lowercase : List[Any] = num_labels
_lowercase : List[str] = num_choices
_lowercase : Dict = scope
_lowercase : List[Any] = range_bbox
def __a ( self ):
_lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
_lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_lowercase : List[str] = bbox[i, j, 3]
_lowercase : Optional[int] = bbox[i, j, 1]
_lowercase : int = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_lowercase : Dict = bbox[i, j, 2]
_lowercase : Dict = bbox[i, j, 0]
_lowercase : int = t
_lowercase : Union[str, Any] = tf.convert_to_tensor(_lowerCAmelCase )
_lowercase : Any = None
if self.use_input_mask:
_lowercase : int = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase : Tuple = None
if self.use_token_type_ids:
_lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowercase : Tuple = None
_lowercase : Union[str, Any] = None
_lowercase : List[str] = None
if self.use_labels:
_lowercase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowercase : str = ids_tensor([self.batch_size] , self.num_choices )
_lowercase : Any = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[Any] = TFLayoutLMModel(config=_lowerCAmelCase )
_lowercase : List[Any] = model(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
_lowercase : List[Any] = model(_lowerCAmelCase , _lowerCAmelCase , token_type_ids=_lowerCAmelCase )
_lowercase : List[str] = model(_lowerCAmelCase , _lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[Any] = TFLayoutLMForMaskedLM(config=_lowerCAmelCase )
_lowercase : Any = model(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : str = self.num_labels
_lowercase : Tuple = TFLayoutLMForSequenceClassification(config=_lowerCAmelCase )
_lowercase : int = model(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Any = self.num_labels
_lowercase : Optional[int] = TFLayoutLMForTokenClassification(config=_lowerCAmelCase )
_lowercase : Union[str, Any] = model(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Union[str, Any] = TFLayoutLMForQuestionAnswering(config=_lowerCAmelCase )
_lowercase : str = model(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self ):
_lowercase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : List[Any] = config_and_inputs
_lowercase : Optional[Any] = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( __snake_case , __snake_case , unittest.TestCase ):
_UpperCamelCase : Optional[int] = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
_UpperCamelCase : Union[str, Any] = (
{
"feature-extraction": TFLayoutLMModel,
"fill-mask": TFLayoutLMForMaskedLM,
"text-classification": TFLayoutLMForSequenceClassification,
"token-classification": TFLayoutLMForTokenClassification,
"zero-shot": TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
_UpperCamelCase : str = False
_UpperCamelCase : List[str] = True
_UpperCamelCase : Tuple = 10
def __a ( self ):
_lowercase : Optional[int] = TFLayoutLMModelTester(self )
_lowercase : str = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=3_7 )
def __a ( self ):
self.config_tester.run_common_tests()
def __a ( self ):
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def __a ( self ):
_lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowerCAmelCase )
def __a ( self ):
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowerCAmelCase )
def __a ( self ):
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowerCAmelCase )
def __a ( self ):
_lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowerCAmelCase )
@slow
def __a ( self ):
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : List[Any] = TFLayoutLMModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@unittest.skip('Onnx compliancy broke with TF 2.10' )
def __a ( self ):
pass
def __magic_name__ ( ) -> Optional[int]:
# Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on:
# fmt: off
_lowercase : Optional[Any] = tf.convert_to_tensor([[101,1_019,1_014,1_016,1_037,12_849,4_747,1_004,14_246,2_278,5_439,4_524,5_002,2_930,2_193,2_930,4_341,3_208,1_005,1_055,2_171,2_848,11_300,3_531,102],[101,4_070,4_034,7_020,1_024,3_058,1_015,1_013,2_861,1_013,6_070,19_274,2_772,6_205,27_814,16_147,16_147,4_343,2_047,10_283,10_969,14_389,1_012,2_338,102]] ) # noqa: E231
_lowercase : Tuple = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
_lowercase : Optional[int] = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1_000,1_000,1_000,1_000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1_000,1_000,1_000,1_000]]] ) # noqa: E231
_lowercase : int = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
_lowercase : Union[str, Any] = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def __a ( self ):
_lowercase : Tuple = TFLayoutLMModel.from_pretrained('microsoft/layoutlm-base-uncased' )
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase : Optional[int] = prepare_layoutlm_batch_inputs()
# forward pass
_lowercase : Tuple = model(input_ids=_lowerCAmelCase , bbox=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
# test the sequence output on [0, :3, :3]
_lowercase : Optional[Any] = tf.convert_to_tensor(
[[0.17_85, -0.19_47, -0.04_25], [-0.32_54, -0.28_07, 0.25_53], [-0.53_91, -0.33_22, 0.33_64]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _lowerCAmelCase , atol=1E-3 ) )
# test the pooled output on [1, :3]
_lowercase : Optional[int] = tf.convert_to_tensor([-0.65_80, -0.02_14, 0.85_52] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , _lowerCAmelCase , atol=1E-3 ) )
@slow
def __a ( self ):
# initialize model with randomly initialized sequence classification head
_lowercase : Optional[Any] = TFLayoutLMForSequenceClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=2 )
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase : Optional[Any] = prepare_layoutlm_batch_inputs()
# forward pass
_lowercase : Any = model(
input_ids=_lowerCAmelCase , bbox=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
_lowercase : List[Any] = outputs.loss
_lowercase : Any = (2,)
self.assertEqual(loss.shape , _lowerCAmelCase )
# test the shape of the logits
_lowercase : str = outputs.logits
_lowercase : Dict = (2, 2)
self.assertEqual(logits.shape , _lowerCAmelCase )
@slow
def __a ( self ):
# initialize model with randomly initialized token classification head
_lowercase : Dict = TFLayoutLMForTokenClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=1_3 )
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase : str = prepare_layoutlm_batch_inputs()
# forward pass
_lowercase : Dict = model(
input_ids=_lowerCAmelCase , bbox=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
# test the shape of the logits
_lowercase : Dict = outputs.logits
_lowercase : Optional[Any] = tf.convert_to_tensor((2, 2_5, 1_3) )
self.assertEqual(logits.shape , _lowerCAmelCase )
@slow
def __a ( self ):
# initialize model with randomly initialized token classification head
_lowercase : Union[str, Any] = TFLayoutLMForQuestionAnswering.from_pretrained('microsoft/layoutlm-base-uncased' )
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase : List[Any] = prepare_layoutlm_batch_inputs()
# forward pass
_lowercase : int = model(input_ids=_lowerCAmelCase , bbox=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
# test the shape of the logits
_lowercase : Any = tf.convert_to_tensor((2, 2_5) )
self.assertEqual(outputs.start_logits.shape , _lowerCAmelCase )
self.assertEqual(outputs.end_logits.shape , _lowerCAmelCase )
| 677 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json",
"junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json",
"junnyu/roformer_chinese_char_small": (
"https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json"
),
"junnyu/roformer_chinese_char_base": (
"https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json"
),
"junnyu/roformer_small_discriminator": (
"https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json"
),
"junnyu/roformer_small_generator": (
"https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json"
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class lowerCAmelCase_ ( __snake_case ):
_UpperCamelCase : Tuple = "roformer"
def __init__( self , _lowerCAmelCase=5_0_0_0_0 , _lowerCAmelCase=None , _lowerCAmelCase=7_6_8 , _lowerCAmelCase=1_2 , _lowerCAmelCase=1_2 , _lowerCAmelCase=3_0_7_2 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=1_5_3_6 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-12 , _lowerCAmelCase=0 , _lowerCAmelCase=False , _lowerCAmelCase=True , **_lowerCAmelCase , ):
super().__init__(pad_token_id=_lowerCAmelCase , **_lowerCAmelCase )
_lowercase : Dict = vocab_size
_lowercase : int = hidden_size if embedding_size is None else embedding_size
_lowercase : Tuple = hidden_size
_lowercase : Union[str, Any] = num_hidden_layers
_lowercase : Any = num_attention_heads
_lowercase : List[Any] = hidden_act
_lowercase : Union[str, Any] = intermediate_size
_lowercase : int = hidden_dropout_prob
_lowercase : List[Any] = attention_probs_dropout_prob
_lowercase : Optional[int] = max_position_embeddings
_lowercase : str = type_vocab_size
_lowercase : int = initializer_range
_lowercase : Dict = layer_norm_eps
_lowercase : Optional[Any] = rotary_value
_lowercase : Tuple = use_cache
class lowerCAmelCase_ ( __snake_case ):
@property
def __a ( self ):
if self.task == "multiple-choice":
_lowercase : List[str] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowercase : int = {0: 'batch', 1: 'sequence'}
_lowercase : int = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 677 |
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class lowerCAmelCase_ ( unittest.TestCase ):
def __a ( self ):
_lowercase : List[str] = logging.get_logger()
# the current default level is logging.WARNING
_lowercase : Union[str, Any] = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(_lowerCAmelCase )
def __a ( self ):
_lowercase : List[str] = logging.get_verbosity()
_lowercase : int = logging.get_logger('transformers.models.bart.tokenization_bart' )
_lowercase : Tuple = 'Testing 1, 2, 3'
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(_lowerCAmelCase ) as cl:
logger.warning(_lowerCAmelCase )
self.assertEqual(cl.out , msg + '\n' )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(_lowerCAmelCase ) as cl:
logger.warning(_lowerCAmelCase )
self.assertEqual(cl.out , '' )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(_lowerCAmelCase ) as cl:
logger.warning(_lowerCAmelCase )
self.assertEqual(cl.out , msg + '\n' )
# restore to the original level
logging.set_verbosity(_lowerCAmelCase )
@mockenv(TRANSFORMERS_VERBOSITY='error' )
def __a ( self ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
_lowercase : List[str] = logging.get_logger('transformers.models.bart.tokenization_bart' )
_lowercase : int = os.getenv('TRANSFORMERS_VERBOSITY' , _lowerCAmelCase )
_lowercase : Optional[Any] = logging.log_levels[env_level_str]
_lowercase : Dict = logging.get_verbosity()
self.assertEqual(
_lowerCAmelCase , _lowerCAmelCase , F"""TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}""" , )
# restore to the original level
_lowercase : Any = ''
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY='super-error' )
def __a ( self ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
_lowercase : Tuple = logging.logging.getLogger()
with CaptureLogger(_lowerCAmelCase ) as cl:
# this action activates the env var
logging.get_logger('transformers.models.bart.tokenization_bart' )
self.assertIn('Unknown option TRANSFORMERS_VERBOSITY=super-error' , cl.out )
# no need to restore as nothing was changed
def __a ( self ):
# testing `logger.warning_advice()`
transformers.utils.logging._reset_library_root_logger()
_lowercase : str = logging.get_logger('transformers.models.bart.tokenization_bart' )
_lowercase : List[str] = 'Testing 1, 2, 3'
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='1' ):
# nothing should be logged as env var disables this method
with CaptureLogger(_lowerCAmelCase ) as cl:
logger.warning_advice(_lowerCAmelCase )
self.assertEqual(cl.out , '' )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='' ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(_lowerCAmelCase ) as cl:
logger.warning_advice(_lowerCAmelCase )
self.assertEqual(cl.out , msg + '\n' )
def __magic_name__ ( ) -> List[str]:
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 677 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase = {
"configuration_bert": ["BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BertConfig", "BertOnnxConfig"],
"tokenization_bert": ["BasicTokenizer", "BertTokenizer", "WordpieceTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["BertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BertForMaskedLM",
"BertForMultipleChoice",
"BertForNextSentencePrediction",
"BertForPreTraining",
"BertForQuestionAnswering",
"BertForSequenceClassification",
"BertForTokenClassification",
"BertLayer",
"BertLMHeadModel",
"BertModel",
"BertPreTrainedModel",
"load_tf_weights_in_bert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFBertEmbeddings",
"TFBertForMaskedLM",
"TFBertForMultipleChoice",
"TFBertForNextSentencePrediction",
"TFBertForPreTraining",
"TFBertForQuestionAnswering",
"TFBertForSequenceClassification",
"TFBertForTokenClassification",
"TFBertLMHeadModel",
"TFBertMainLayer",
"TFBertModel",
"TFBertPreTrainedModel",
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["TFBertTokenizer"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"FlaxBertForCausalLM",
"FlaxBertForMaskedLM",
"FlaxBertForMultipleChoice",
"FlaxBertForNextSentencePrediction",
"FlaxBertForPreTraining",
"FlaxBertForQuestionAnswering",
"FlaxBertForSequenceClassification",
"FlaxBertForTokenClassification",
"FlaxBertModel",
"FlaxBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 677 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
UpperCamelCase = "pt"
elif is_tf_available():
UpperCamelCase = "tf"
else:
UpperCamelCase = "jax"
class lowerCAmelCase_ ( __snake_case , unittest.TestCase ):
_UpperCamelCase : Dict = PerceiverTokenizer
_UpperCamelCase : str = False
def __a ( self ):
super().setUp()
_lowercase : List[Any] = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __a ( self ):
return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' )
def __a ( self , **_lowerCAmelCase ):
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=2_0 , _lowerCAmelCase=5 ):
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
_lowercase : Union[str, Any] = []
for i in range(len(_lowerCAmelCase ) ):
try:
_lowercase : Any = tokenizer.decode([i] , clean_up_tokenization_spaces=_lowerCAmelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
_lowercase : List[Any] = list(filter(lambda _lowerCAmelCase : re.match(r'^[ a-zA-Z]+$' , t[1] ) , _lowerCAmelCase ) )
_lowercase : Union[str, Any] = list(filter(lambda _lowerCAmelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_lowerCAmelCase ) , _lowerCAmelCase ) )
if max_length is not None and len(_lowerCAmelCase ) > max_length:
_lowercase : Any = toks[:max_length]
if min_length is not None and len(_lowerCAmelCase ) < min_length and len(_lowerCAmelCase ) > 0:
while len(_lowerCAmelCase ) < min_length:
_lowercase : Optional[Any] = toks + toks
# toks_str = [t[1] for t in toks]
_lowercase : Optional[Any] = [t[0] for t in toks]
# Ensure consistency
_lowercase : Any = tokenizer.decode(_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase )
if " " not in output_txt and len(_lowerCAmelCase ) > 1:
_lowercase : List[str] = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_lowerCAmelCase )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_lowerCAmelCase )
)
if with_prefix_space:
_lowercase : List[Any] = ' ' + output_txt
_lowercase : Dict = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
return output_txt, output_ids
def __a ( self ):
_lowercase : Dict = self.perceiver_tokenizer
_lowercase : Optional[Any] = 'Unicode €.'
_lowercase : str = tokenizer(_lowerCAmelCase )
_lowercase : int = [4, 9_1, 1_1_6, 1_1_1, 1_0_5, 1_1_7, 1_0_6, 1_0_7, 3_8, 2_3_2, 1_3_6, 1_7_8, 5_2, 5]
self.assertEqual(encoded['input_ids'] , _lowerCAmelCase )
# decoding
_lowercase : List[Any] = tokenizer.decode(_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , '[CLS]Unicode €.[SEP]' )
_lowercase : Union[str, Any] = tokenizer('e è é ê ë' )
_lowercase : List[Any] = [4, 1_0_7, 3_8, 2_0_1, 1_7_4, 3_8, 2_0_1, 1_7_5, 3_8, 2_0_1, 1_7_6, 3_8, 2_0_1, 1_7_7, 5]
self.assertEqual(encoded['input_ids'] , _lowerCAmelCase )
# decoding
_lowercase : int = tokenizer.decode(_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , '[CLS]e è é ê ë[SEP]' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , '[CLS]e è é ê ë[SEP]' )
def __a ( self ):
_lowercase : List[str] = self.perceiver_tokenizer
_lowercase : Union[str, Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
_lowercase : Optional[int] = [4, 7_1, 3_8, 1_1_4, 1_1_7, 1_1_6, 1_0_9, 3_8, 1_1_8, 1_0_3, 1_2_0, 1_0_3, 1_0_9, 1_2_0, 1_0_3, 1_1_8, 1_1_0, 3_8, 1_0_8, 1_1_7, 1_2_0, 3_8, 1_2_1, 1_2_3, 1_1_5, 1_1_5, 1_0_3, 1_2_0, 1_1_1, 1_2_8, 1_0_3, 1_2_2, 1_1_1, 1_1_7, 1_1_6, 5_2, 5, 0]
# fmt: on
_lowercase : List[Any] = tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors=_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
if FRAMEWORK != "jax":
_lowercase : int = list(batch.input_ids.numpy()[0] )
else:
_lowercase : List[Any] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual((2, 3_8) , batch.input_ids.shape )
self.assertEqual((2, 3_8) , batch.attention_mask.shape )
def __a ( self ):
_lowercase : List[Any] = self.perceiver_tokenizer
_lowercase : Dict = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_lowercase : List[str] = tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors=_lowerCAmelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , _lowerCAmelCase )
self.assertIn('attention_mask' , _lowerCAmelCase )
self.assertNotIn('decoder_input_ids' , _lowerCAmelCase )
self.assertNotIn('decoder_attention_mask' , _lowerCAmelCase )
def __a ( self ):
_lowercase : Optional[int] = self.perceiver_tokenizer
_lowercase : Optional[Any] = [
'Summary of the text.',
'Another summary.',
]
_lowercase : Optional[int] = tokenizer(
text_target=_lowerCAmelCase , max_length=3_2 , padding='max_length' , truncation=_lowerCAmelCase , return_tensors=_lowerCAmelCase )
self.assertEqual(3_2 , targets['input_ids'].shape[1] )
def __a ( self ):
# safety check on max_len default value so we are sure the test works
_lowercase : Tuple = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 4_2 )
# Now let's start the test
_lowercase : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
_lowercase : Dict = tempfile.mkdtemp()
_lowercase : Tuple = ' He is very happy, UNwant\u00E9d,running'
_lowercase : Union[str, Any] = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
tokenizer.save_pretrained(_lowerCAmelCase )
_lowercase : Tuple = tokenizer.__class__.from_pretrained(_lowerCAmelCase )
_lowercase : Optional[Any] = after_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
shutil.rmtree(_lowerCAmelCase )
_lowercase : Union[str, Any] = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
_lowercase : List[str] = tempfile.mkdtemp()
_lowercase : int = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
_lowercase : Any = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
_lowercase : Tuple = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
tokenizer.save_pretrained(_lowerCAmelCase )
_lowercase : Tuple = tokenizer.__class__.from_pretrained(_lowerCAmelCase )
_lowercase : Tuple = after_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 4_2 )
_lowercase : List[Any] = tokenizer.__class__.from_pretrained(_lowerCAmelCase , model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length , 4_3 )
shutil.rmtree(_lowerCAmelCase )
def __a ( self ):
_lowercase : Optional[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
_lowercase : List[str] = json.load(_lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
_lowercase : Tuple = json.load(_lowerCAmelCase )
_lowercase : Any = [F"""<extra_id_{i}>""" for i in range(1_2_5 )]
_lowercase : str = added_tokens_extra_ids + [
'an_additional_special_token'
]
_lowercase : Optional[int] = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(_lowerCAmelCase , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_lowercase : Optional[int] = tokenizer_class.from_pretrained(
_lowerCAmelCase , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_lowercase : int = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=_lowerCAmelCase )]
_lowercase : Tuple = tokenizer_class.from_pretrained(
_lowerCAmelCase , additional_special_tokens=_lowerCAmelCase , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def __a ( self ):
_lowercase : str = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([1_7_8] ) , '�' )
def __a ( self ):
pass
def __a ( self ):
pass
def __a ( self ):
pass
def __a ( self ):
pass
def __a ( self ):
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
_lowercase : List[str] = self.get_tokenizers(fast=_lowerCAmelCase , do_lower_case=_lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_lowercase : Optional[Any] = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]']
_lowercase : Optional[Any] = tokenizer.convert_tokens_to_string(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
| 677 | 1 |
from queue import PriorityQueue
from typing import Any
import numpy as np
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> float | int:
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
_lowercase : Optional[Any] = cst_fwd.get(SCREAMING_SNAKE_CASE , np.inf )
_lowercase : Optional[Any] = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
_lowercase : Union[str, Any] = new_cost_f
_lowercase : Dict = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
_lowercase : int = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
_lowercase : List[str] = -1
_lowercase : Any = set()
_lowercase : Dict = set()
_lowercase : Optional[Any] = {source: 0}
_lowercase : Optional[Any] = {destination: 0}
_lowercase : int = {source: None}
_lowercase : List[Any] = {destination: None}
_lowercase : PriorityQueue[Any] = PriorityQueue()
_lowercase : PriorityQueue[Any] = PriorityQueue()
_lowercase : Optional[Any] = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
_lowercase , _lowercase : List[Any] = queue_forward.get()
visited_forward.add(SCREAMING_SNAKE_CASE )
_lowercase , _lowercase : str = queue_backward.get()
visited_backward.add(SCREAMING_SNAKE_CASE )
_lowercase : Dict = pass_and_relaxation(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , )
_lowercase : str = pass_and_relaxation(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
_lowercase : Tuple = shortest_distance
return shortest_path_distance
UpperCamelCase = {
"B": [["C", 1]],
"C": [["D", 1]],
"D": [["F", 1]],
"E": [["B", 1], ["G", 2]],
"F": [],
"G": [["F", 1]],
}
UpperCamelCase = {
"B": [["E", 1]],
"C": [["B", 1]],
"D": [["C", 1]],
"F": [["D", 1], ["G", 1]],
"E": [[None, np.inf]],
"G": [["E", 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 677 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase = {
"configuration_conditional_detr": [
"CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ConditionalDetrConfig",
"ConditionalDetrOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["ConditionalDetrFeatureExtractor"]
UpperCamelCase = ["ConditionalDetrImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConditionalDetrForObjectDetection",
"ConditionalDetrForSegmentation",
"ConditionalDetrModel",
"ConditionalDetrPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 677 | 1 |
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=1_3 , _lowerCAmelCase=1_0 , _lowerCAmelCase=3 , _lowerCAmelCase=2 , _lowerCAmelCase=2 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=3_2 , _lowerCAmelCase=5 , _lowerCAmelCase=4 , _lowerCAmelCase=3_7 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=1_0 , _lowerCAmelCase=0.02 , _lowerCAmelCase="divided_space_time" , _lowerCAmelCase=None , ):
_lowercase : Optional[int] = parent
_lowercase : str = batch_size
_lowercase : Optional[Any] = image_size
_lowercase : Dict = num_channels
_lowercase : Tuple = patch_size
_lowercase : Any = num_frames
_lowercase : str = is_training
_lowercase : Optional[int] = use_labels
_lowercase : Optional[Any] = hidden_size
_lowercase : List[str] = num_hidden_layers
_lowercase : List[Any] = num_attention_heads
_lowercase : Optional[Any] = intermediate_size
_lowercase : Dict = hidden_act
_lowercase : Optional[Any] = hidden_dropout_prob
_lowercase : Union[str, Any] = attention_probs_dropout_prob
_lowercase : List[Any] = attention_type
_lowercase : str = initializer_range
_lowercase : Union[str, Any] = scope
_lowercase : int = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
_lowercase : List[str] = (image_size // patch_size) ** 2
_lowercase : List[Any] = (num_frames) * self.num_patches_per_frame + 1
def __a ( self ):
_lowercase : Tuple = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
_lowercase : Optional[Any] = None
if self.use_labels:
_lowercase : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
_lowercase : Optional[int] = self.get_config()
return config, pixel_values, labels
def __a ( self ):
_lowercase : int = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
_lowercase : Dict = self.num_labels
return config
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Union[str, Any] = TimesformerModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowercase : Tuple = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Union[str, Any] = TimesformerForVideoClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowercase : Optional[int] = model(_lowerCAmelCase )
# verify the logits shape
_lowercase : int = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , _lowerCAmelCase )
def __a ( self ):
_lowercase : List[Any] = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase : int = config_and_inputs
_lowercase : int = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( __snake_case , __snake_case , unittest.TestCase ):
_UpperCamelCase : Optional[int] = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
_UpperCamelCase : Optional[Any] = (
{"feature-extraction": TimesformerModel, "video-classification": TimesformerForVideoClassification}
if is_torch_available()
else {}
)
_UpperCamelCase : List[str] = False
_UpperCamelCase : Optional[Any] = False
_UpperCamelCase : int = False
_UpperCamelCase : List[Any] = False
def __a ( self ):
_lowercase : int = TimesformerModelTester(self )
_lowercase : List[str] = ConfigTester(
self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=3_7 )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ):
_lowercase : Union[str, Any] = copy.deepcopy(_lowerCAmelCase )
if return_labels:
if model_class in get_values(_lowerCAmelCase ):
_lowercase : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowerCAmelCase )
return inputs_dict
def __a ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='TimeSformer does not use inputs_embeds' )
def __a ( self ):
pass
def __a ( self ):
_lowercase , _lowercase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : Union[str, Any] = model_class(_lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowercase : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase , nn.Linear ) )
def __a ( self ):
_lowercase , _lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : Optional[int] = model_class(_lowerCAmelCase )
_lowercase : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase : Tuple = [*signature.parameters.keys()]
_lowercase : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def __a ( self ):
_lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def __a ( self ):
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*_lowerCAmelCase )
@slow
def __a ( self ):
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : Union[str, Any] = TimesformerModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def __a ( self ):
if not self.has_attentions:
pass
else:
_lowercase , _lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase : str = True
for model_class in self.all_model_classes:
_lowercase : int = self.model_tester.seq_length
_lowercase : Optional[Any] = self.model_tester.num_frames
_lowercase : Dict = True
_lowercase : List[str] = False
_lowercase : List[str] = True
_lowercase : Any = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
_lowercase : List[Any] = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
_lowercase : Union[str, Any] = outputs.attentions
self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_lowercase : str = True
_lowercase : Union[str, Any] = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
_lowercase : Dict = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
_lowercase : Dict = outputs.attentions
self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
_lowercase : str = len(_lowerCAmelCase )
# Check attention is always last and order is fine
_lowercase : Optional[Any] = True
_lowercase : Union[str, Any] = True
_lowercase : Union[str, Any] = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
_lowercase : List[Any] = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
self.assertEqual(out_len + 1 , len(_lowerCAmelCase ) )
_lowercase : Dict = outputs.attentions
self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def __a ( self ):
def check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Union[str, Any] = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
_lowercase : Optional[int] = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
_lowercase : Any = outputs.hidden_states
_lowercase : int = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
_lowercase : List[Any] = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
_lowercase , _lowercase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : List[str] = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowercase : Union[str, Any] = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def __magic_name__ ( ) -> Any:
_lowercase : Dict = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' )
_lowercase : Union[str, Any] = np.load(SCREAMING_SNAKE_CASE )
return list(SCREAMING_SNAKE_CASE )
@require_torch
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
@cached_property
def __a ( self ):
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def __a ( self ):
_lowercase : List[Any] = TimesformerForVideoClassification.from_pretrained('facebook/timesformer-base-finetuned-k400' ).to(
_lowerCAmelCase )
_lowercase : Optional[int] = self.default_image_processor
_lowercase : Dict = prepare_video()
_lowercase : Optional[Any] = image_processor(video[:8] , return_tensors='pt' ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
_lowercase : Union[str, Any] = model(**_lowerCAmelCase )
# verify the logits
_lowercase : Tuple = torch.Size((1, 4_0_0) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
_lowercase : List[str] = torch.tensor([-0.30_16, -0.77_13, -0.42_05] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1E-4 ) )
| 677 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCAmelCase_ ( __snake_case ):
_UpperCamelCase : Tuple = "ClapFeatureExtractor"
_UpperCamelCase : Optional[int] = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ):
super().__init__(_lowerCAmelCase , _lowerCAmelCase )
def __call__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , **_lowerCAmelCase ):
_lowercase : str = kwargs.pop('sampling_rate' , _lowerCAmelCase )
if text is None and audios is None:
raise ValueError('You have to specify either text or audios. Both cannot be none.' )
if text is not None:
_lowercase : Dict = self.tokenizer(_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase )
if audios is not None:
_lowercase : Any = self.feature_extractor(
_lowerCAmelCase , sampling_rate=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase )
if text is not None and audios is not None:
_lowercase : Union[str, Any] = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_lowerCAmelCase ) , tensor_type=_lowerCAmelCase )
def __a ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
return self.tokenizer.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase )
def __a ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
return self.tokenizer.decode(*_lowerCAmelCase , **_lowerCAmelCase )
@property
def __a ( self ):
_lowercase : Dict = self.tokenizer.model_input_names
_lowercase : Any = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 677 | 1 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]:
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
_lowercase : int = tmp_path / 'cache'
_lowercase : List[str] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_lowercase : Optional[Any] = ParquetDatasetReader(SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ , keep_in_memory=SCREAMING_SNAKE_CASE_ ).read()
_check_parquet_dataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]:
_lowercase : Tuple = tmp_path / 'cache'
_lowercase : Dict = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
_lowercase : Union[str, Any] = features.copy() if features else default_expected_features
_lowercase : Optional[int] = (
Features({feature: Value(SCREAMING_SNAKE_CASE_ ) for feature, dtype in features.items()} ) if features is not None else None
)
_lowercase : Tuple = ParquetDatasetReader(SCREAMING_SNAKE_CASE_ , features=SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ ).read()
_check_parquet_dataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]:
_lowercase : Tuple = tmp_path / 'cache'
_lowercase : Tuple = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
_lowercase : Optional[int] = ParquetDatasetReader(SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ , split=SCREAMING_SNAKE_CASE_ ).read()
_check_parquet_dataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type' , [str, list] )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]:
if issubclass(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
_lowercase : Any = parquet_path
elif issubclass(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
_lowercase : Optional[Any] = [parquet_path]
_lowercase : Optional[Any] = tmp_path / 'cache'
_lowercase : Union[str, Any] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
_lowercase : Optional[int] = ParquetDatasetReader(SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ ).read()
_check_parquet_dataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=("train",) ) -> List[str]:
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for split in splits:
_lowercase : Tuple = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
_lowercase : Tuple = tmp_path / 'cache'
_lowercase : Optional[int] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_lowercase : Union[str, Any] = ParquetDatasetReader(
{'train': parquet_path} , cache_dir=SCREAMING_SNAKE_CASE_ , keep_in_memory=SCREAMING_SNAKE_CASE_ ).read()
_check_parquet_datasetdict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
_lowercase : Any = tmp_path / 'cache'
_lowercase : Any = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
_lowercase : Dict = features.copy() if features else default_expected_features
_lowercase : str = (
Features({feature: Value(SCREAMING_SNAKE_CASE_ ) for feature, dtype in features.items()} ) if features is not None else None
)
_lowercase : Dict = ParquetDatasetReader({'train': parquet_path} , features=SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ ).read()
_check_parquet_datasetdict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
if split:
_lowercase : Optional[int] = {split: parquet_path}
else:
_lowercase : Dict = 'train'
_lowercase : Any = {'train': parquet_path, 'test': parquet_path}
_lowercase : Optional[Any] = tmp_path / 'cache'
_lowercase : Optional[int] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
_lowercase : Optional[Any] = ParquetDatasetReader(SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ ).read()
_check_parquet_datasetdict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
_lowercase : List[Any] = ParquetDatasetWriter(SCREAMING_SNAKE_CASE_ , tmp_path / 'foo.parquet' )
assert writer.write() > 0
_lowercase : List[str] = pq.ParquetFile(tmp_path / 'foo.parquet' )
_lowercase : Union[str, Any] = pf.read()
assert dataset.data.table == output_table
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
_lowercase : str = str(shared_datadir / 'test_image_rgb.jpg' )
_lowercase : Union[str, Any] = {'image': [image_path]}
_lowercase : Any = Features({'image': Image()} )
_lowercase : Union[str, Any] = Dataset.from_dict(SCREAMING_SNAKE_CASE_ , features=SCREAMING_SNAKE_CASE_ )
_lowercase : int = ParquetDatasetWriter(SCREAMING_SNAKE_CASE_ , tmp_path / 'foo.parquet' )
assert writer.write() > 0
_lowercase : int = Dataset.from_parquet(str(tmp_path / 'foo.parquet' ) )
assert dataset.features == reloaded_dataset.features
_lowercase : Union[str, Any] = ParquetDatasetReader(str(tmp_path / 'foo.parquet' ) , streaming=SCREAMING_SNAKE_CASE_ ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'feature, expected' , [
(Features({'foo': Value('int32' )} ), None),
(Features({'image': Image(), 'foo': Value('int32' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'nested': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
assert get_writer_batch_size(SCREAMING_SNAKE_CASE_ ) == expected
| 700 |
from __future__ import annotations
from typing import Any
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase ):
_lowercase : Any = num_of_nodes
_lowercase : list[list[int]] = []
_lowercase : dict[int, int] = {}
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
self.m_edges.append([u_node, v_node, weight] )
def __a ( self , _lowerCAmelCase ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def __a ( self , _lowerCAmelCase ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
_lowercase : Optional[int] = self.find_component(_lowerCAmelCase )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
if component_size[u_node] <= component_size[v_node]:
_lowercase : str = v_node
component_size[v_node] += component_size[u_node]
self.set_component(_lowerCAmelCase )
elif component_size[u_node] >= component_size[v_node]:
_lowercase : Any = self.find_component(_lowerCAmelCase )
component_size[u_node] += component_size[v_node]
self.set_component(_lowerCAmelCase )
def __a ( self ):
_lowercase : Any = []
_lowercase : Optional[Any] = 0
_lowercase : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
_lowercase : str = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
_lowercase , _lowercase , _lowercase : List[str] = edge
_lowercase : Union[str, Any] = self.m_component[u]
_lowercase : Union[str, Any] = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
_lowercase : str = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_lowercase , _lowercase , _lowercase : int = edge
_lowercase : Optional[int] = self.m_component[u]
_lowercase : Optional[Any] = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
print(F"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" )
num_of_components -= 1
_lowercase : str = [-1] * self.m_num_of_nodes
print(F"""The total weight of the minimal spanning tree is: {mst_weight}""" )
def __magic_name__ ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 677 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowerCAmelCase_ ( _snake_case , unittest.TestCase ):
_UpperCamelCase : int = ShapEImgaImgPipeline
_UpperCamelCase : List[Any] = ["image"]
_UpperCamelCase : Dict = ["image"]
_UpperCamelCase : str = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
_UpperCamelCase : Tuple = False
@property
def __a ( self ):
return 3_2
@property
def __a ( self ):
return 3_2
@property
def __a ( self ):
return self.time_input_dim * 4
@property
def __a ( self ):
return 8
@property
def __a ( self ):
torch.manual_seed(0 )
_lowercase : List[Any] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=6_4 , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
_lowercase : Any = CLIPVisionModel(lowerCAmelCase__ )
return model
@property
def __a ( self ):
_lowercase : Optional[int] = CLIPImageProcessor(
crop_size=2_2_4 , do_center_crop=lowerCAmelCase__ , do_normalize=lowerCAmelCase__ , do_resize=lowerCAmelCase__ , image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , resample=3 , size=2_2_4 , )
return image_processor
@property
def __a ( self ):
torch.manual_seed(0 )
_lowercase : int = {
'num_attention_heads': 2,
'attention_head_dim': 1_6,
'embedding_dim': self.time_input_dim,
'num_embeddings': 3_2,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'embedding_proj_norm_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
_lowercase : Tuple = PriorTransformer(**lowerCAmelCase__ )
return model
@property
def __a ( self ):
torch.manual_seed(0 )
_lowercase : Dict = {
'param_shapes': (
(self.renderer_dim, 9_3),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 1_2,
'background': (
0.1,
0.1,
0.1,
),
}
_lowercase : str = ShapERenderer(**lowerCAmelCase__ )
return model
def __a ( self ):
_lowercase : Optional[int] = self.dummy_prior
_lowercase : Any = self.dummy_image_encoder
_lowercase : Any = self.dummy_image_processor
_lowercase : Any = self.dummy_renderer
_lowercase : Optional[int] = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1_0_2_4 , prediction_type='sample' , use_karras_sigmas=lowerCAmelCase__ , clip_sample=lowerCAmelCase__ , clip_sample_range=1.0 , )
_lowercase : int = {
'prior': prior,
'image_encoder': image_encoder,
'image_processor': image_processor,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def __a ( self , _lowerCAmelCase , _lowerCAmelCase=0 ):
_lowercase : Union[str, Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
if str(lowerCAmelCase__ ).startswith('mps' ):
_lowercase : List[Any] = torch.manual_seed(lowerCAmelCase__ )
else:
_lowercase : Union[str, Any] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
_lowercase : str = {
'image': input_image,
'generator': generator,
'num_inference_steps': 1,
'frame_size': 3_2,
'output_type': 'np',
}
return inputs
def __a ( self ):
_lowercase : List[Any] = 'cpu'
_lowercase : Dict = self.get_dummy_components()
_lowercase : List[Any] = self.pipeline_class(**lowerCAmelCase__ )
_lowercase : Optional[int] = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_lowercase : Optional[int] = pipe(**self.get_dummy_inputs(lowerCAmelCase__ ) )
_lowercase : Optional[int] = output.images[0]
_lowercase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (2_0, 3_2, 3_2, 3)
_lowercase : Dict = np.array(
[
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __a ( self ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __a ( self ):
_lowercase : List[Any] = torch_device == 'cpu'
_lowercase : Optional[Any] = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=lowerCAmelCase__ , relax_max_difference=lowerCAmelCase__ , )
def __a ( self ):
_lowercase : Union[str, Any] = self.get_dummy_components()
_lowercase : List[str] = self.pipeline_class(**lowerCAmelCase__ )
_lowercase : int = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_lowercase : Dict = 1
_lowercase : int = 2
_lowercase : Union[str, Any] = self.get_dummy_inputs(lowerCAmelCase__ )
for key in inputs.keys():
if key in self.batch_params:
_lowercase : str = batch_size * [inputs[key]]
_lowercase : int = pipe(**lowerCAmelCase__ , num_images_per_prompt=lowerCAmelCase__ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
def __a ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self ):
_lowercase : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/corgi.png' )
_lowercase : Optional[int] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_img2img_out.npy' )
_lowercase : Union[str, Any] = ShapEImgaImgPipeline.from_pretrained('openai/shap-e-img2img' )
_lowercase : Dict = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_lowercase : Tuple = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
_lowercase : List[str] = pipe(
lowerCAmelCase__ , generator=lowerCAmelCase__ , guidance_scale=3.0 , num_inference_steps=6_4 , frame_size=6_4 , output_type='np' , ).images[0]
assert images.shape == (2_0, 6_4, 6_4, 3)
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
| 701 |
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Optional[Any]:
_lowercase : Tuple = {}
_lowercase : str = tokenizer(example['content'] , truncation=SCREAMING_SNAKE_CASE )['input_ids']
_lowercase : List[str] = len(example['content'] ) / len(output['input_ids'] )
return output
UpperCamelCase = HfArgumentParser(PretokenizationArguments)
UpperCamelCase = parser.parse_args()
if args.num_workers is None:
UpperCamelCase = multiprocessing.cpu_count()
UpperCamelCase = AutoTokenizer.from_pretrained(args.tokenizer_dir)
UpperCamelCase = time.time()
UpperCamelCase = load_dataset(args.dataset_name, split="train")
print(f'''Dataset loaded in {time.time()-t_start:.2f}s''')
UpperCamelCase = time.time()
UpperCamelCase = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"repo_name",
"path",
"copies",
"size",
"content",
"license",
"hash",
"line_mean",
"line_max",
"alpha_frac",
"autogenerated",
],
)
print(f'''Dataset tokenized in {time.time()-t_start:.2f}s''')
UpperCamelCase = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(f'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 677 | 0 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> int:
# A local function to see if a dot lands in the circle.
def is_in_circle(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> bool:
_lowercase : Any = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
_lowercase : List[str] = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(_SCREAMING_SNAKE_CASE ) )
# The ratio of the area for circle to square is pi/4.
_lowercase : List[str] = proportion * 4
print(F"""The estimated value of pi is {pi_estimate}""" )
print(F"""The numpy value of pi is {pi}""" )
print(F"""The total error is {abs(pi - pi_estimate )}""" )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0.0 , SCREAMING_SNAKE_CASE = 1.0 , ) -> float:
return mean(
function_to_integrate(uniform(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) for _ in range(_SCREAMING_SNAKE_CASE ) ) * (max_value - min_value)
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0.0 , SCREAMING_SNAKE_CASE = 1.0 ) -> None:
def identity_function(SCREAMING_SNAKE_CASE ) -> float:
return x
_lowercase : Optional[int] = area_under_curve_estimator(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_lowercase : str = (max_value * max_value - min_value * min_value) / 2
print('******************' )
print(F"""Estimating area under y=x where x varies from {min_value} to {max_value}""" )
print(F"""Estimated value is {estimated_value}""" )
print(F"""Expected value is {expected_value}""" )
print(F"""Total error is {abs(estimated_value - expected_value )}""" )
print('******************' )
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> None:
def function_to_integrate(SCREAMING_SNAKE_CASE ) -> float:
return sqrt(4.0 - x * x )
_lowercase : str = area_under_curve_estimator(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 0.0 , 2.0 )
print('******************' )
print('Estimating pi using area_under_curve_estimator' )
print(F"""Estimated value is {estimated_value}""" )
print(F"""Expected value is {pi}""" )
print(F"""Total error is {abs(estimated_value - pi )}""" )
print('******************' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702 |
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
UpperCamelCase = logging.getLogger(__name__)
UpperCamelCase = {"facebook/bart-base": BartForConditionalGeneration}
UpperCamelCase = {"facebook/bart-base": BartTokenizer}
def __magic_name__ ( ) -> str:
_lowercase : Optional[int] = argparse.ArgumentParser(description='Export Bart model + Beam Search to ONNX graph.' )
parser.add_argument(
'--validation_file' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='A csv or a json file containing the validation data.' )
parser.add_argument(
'--max_length' , type=SCREAMING_SNAKE_CASE , default=5 , help='The maximum total input sequence length after tokenization.' , )
parser.add_argument(
'--num_beams' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help=(
'Number of beams to use for evaluation. This argument will be '
'passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'
) , )
parser.add_argument(
'--model_name_or_path' , type=SCREAMING_SNAKE_CASE , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=SCREAMING_SNAKE_CASE , )
parser.add_argument(
'--config_name' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='Pretrained config name or path if not the same as model_name' , )
parser.add_argument(
'--device' , type=SCREAMING_SNAKE_CASE , default='cpu' , help='Device where the model will be run' , )
parser.add_argument('--output_file_path' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='Where to store the final ONNX file.' )
_lowercase : Optional[Any] = parser.parse_args()
return args
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE="cpu" ) -> List[Any]:
_lowercase : Dict = model_dict[model_name].from_pretrained(SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE )
_lowercase : int = tokenizer_dict[model_name].from_pretrained(SCREAMING_SNAKE_CASE )
if model_name in ["facebook/bart-base"]:
_lowercase : Dict = 0
_lowercase : Optional[int] = None
_lowercase : Union[str, Any] = 0
return huggingface_model, tokenizer
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
model.eval()
_lowercase : List[Any] = None
_lowercase : List[str] = torch.jit.script(BARTBeamSearchGenerator(SCREAMING_SNAKE_CASE ) )
with torch.no_grad():
_lowercase : Optional[int] = 'My friends are cool but they eat too many carbs.'
_lowercase : int = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1_024 , return_tensors='pt' ).to(model.device )
_lowercase : str = model.generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , num_beams=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , early_stopping=SCREAMING_SNAKE_CASE , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
SCREAMING_SNAKE_CASE , (
inputs['input_ids'],
inputs['attention_mask'],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , SCREAMING_SNAKE_CASE , opset_version=14 , input_names=['input_ids', 'attention_mask', 'num_beams', 'max_length', 'decoder_start_token_id'] , output_names=['output_ids'] , dynamic_axes={
'input_ids': {0: 'batch', 1: 'seq'},
'output_ids': {0: 'batch', 1: 'seq_out'},
} , example_outputs=SCREAMING_SNAKE_CASE , )
logger.info('Model exported to {}'.format(SCREAMING_SNAKE_CASE ) )
_lowercase : str = remove_dup_initializers(os.path.abspath(SCREAMING_SNAKE_CASE ) )
logger.info('Deduplicated and optimized model written to {}'.format(SCREAMING_SNAKE_CASE ) )
_lowercase : Union[str, Any] = onnxruntime.InferenceSession(SCREAMING_SNAKE_CASE )
_lowercase : Union[str, Any] = ort_sess.run(
SCREAMING_SNAKE_CASE , {
'input_ids': inputs['input_ids'].cpu().numpy(),
'attention_mask': inputs['attention_mask'].cpu().numpy(),
'num_beams': np.array(SCREAMING_SNAKE_CASE ),
'max_length': np.array(SCREAMING_SNAKE_CASE ),
'decoder_start_token_id': np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info('Model outputs from torch and ONNX Runtime are similar.' )
logger.info('Success.' )
def __magic_name__ ( ) -> Any:
_lowercase : Dict = parse_args()
_lowercase : Union[str, Any] = 5
_lowercase : Union[str, Any] = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
_lowercase : Optional[Any] = torch.device(args.device )
_lowercase , _lowercase : List[Any] = load_model_tokenizer(args.model_name_or_path , SCREAMING_SNAKE_CASE )
if model.config.decoder_start_token_id is None:
raise ValueError('Make sure that `config.decoder_start_token_id` is correctly defined' )
model.to(SCREAMING_SNAKE_CASE )
if args.max_length:
_lowercase : Any = args.max_length
if args.num_beams:
_lowercase : List[str] = args.num_beams
if args.output_file_path:
_lowercase : Union[str, Any] = args.output_file_path
else:
_lowercase : Tuple = 'BART.onnx'
logger.info('Exporting model to ONNX' )
export_and_validate_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 677 | 0 |
from manim import *
class lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
def __a ( self ):
_lowercase : Any = Rectangle(height=0.5 , width=0.5 )
_lowercase : Optional[int] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_lowercase : List[Any] = [mem.copy() for i in range(6 )]
_lowercase : Union[str, Any] = [mem.copy() for i in range(6 )]
_lowercase : Dict = VGroup(*A_ ).arrange(A_ , buff=0 )
_lowercase : str = VGroup(*A_ ).arrange(A_ , buff=0 )
_lowercase : int = VGroup(A_ , A_ ).arrange(A_ , buff=0 )
_lowercase : Optional[Any] = Text('CPU' , font_size=2_4 )
_lowercase : Optional[int] = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(A_ )
_lowercase : Tuple = [mem.copy() for i in range(4 )]
_lowercase : Optional[int] = VGroup(*A_ ).arrange(A_ , buff=0 )
_lowercase : Union[str, Any] = Text('GPU' , font_size=2_4 )
_lowercase : List[Any] = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
gpu.move_to([-1, -1, 0] )
self.add(A_ )
_lowercase : List[Any] = [mem.copy() for i in range(6 )]
_lowercase : Tuple = VGroup(*A_ ).arrange(A_ , buff=0 )
_lowercase : Any = Text('Model' , font_size=2_4 )
_lowercase : int = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
model.move_to([3, -1.0, 0] )
self.add(A_ )
_lowercase : Union[str, Any] = []
for i, rect in enumerate(A_ ):
rect.set_stroke(A_ )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
_lowercase : str = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(A_ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=A_ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=A_ , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=A_ , buff=0.0 )
self.add(A_ )
cpu_targs.append(A_ )
_lowercase : Union[str, Any] = [mem.copy() for i in range(6 )]
_lowercase : Optional[int] = VGroup(*A_ ).arrange(A_ , buff=0 )
_lowercase : List[Any] = Text('Loaded Checkpoint' , font_size=2_4 )
_lowercase : Tuple = Group(A_ , A_ ).arrange(A_ , aligned_edge=A_ , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
_lowercase : Tuple = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_lowercase : int = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model""" , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
self.add(A_ , A_ )
_lowercase : Any = MarkupText(
F"""<span fgcolor=\'{BLUE}\'>●</span> Checkpoint""" , font_size=1_8 , )
blue_text.next_to(A_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
_lowercase : Optional[int] = MarkupText(
F"""Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.""" , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
self.play(Write(A_ ) , Write(A_ ) )
self.play(Write(A_ , run_time=1 ) , Create(A_ , run_time=1 ) )
_lowercase : str = []
_lowercase : Tuple = []
for i, rect in enumerate(A_ ):
_lowercase : int = fill.copy().set_fill(A_ , opacity=0.7 )
target.move_to(A_ )
first_animations.append(GrowFromCenter(A_ , run_time=1 ) )
_lowercase : Optional[int] = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(A_ , run_time=1.5 ) )
self.play(*A_ )
self.play(*A_ )
self.wait()
| 703 |
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class lowerCAmelCase_ ( __snake_case , __snake_case , unittest.TestCase ):
_UpperCamelCase : Union[str, Any] = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_UpperCamelCase : List[Any] = (
{
"feature-extraction": TFMobileBertModel,
"fill-mask": TFMobileBertForMaskedLM,
"question-answering": TFMobileBertForQuestionAnswering,
"text-classification": TFMobileBertForSequenceClassification,
"token-classification": TFMobileBertForTokenClassification,
"zero-shot": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_UpperCamelCase : int = False
_UpperCamelCase : Optional[int] = False
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ):
_lowercase : int = super()._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
if return_labels:
if model_class in get_values(_lowerCAmelCase ):
_lowercase : Optional[int] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class lowerCAmelCase_ ( __snake_case ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=1_3 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=9_9 , _lowerCAmelCase=3_2 , _lowerCAmelCase=3_2 , _lowerCAmelCase=2 , _lowerCAmelCase=4 , _lowerCAmelCase=3_7 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=5_1_2 , _lowerCAmelCase=1_6 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=None , ):
_lowercase : Optional[Any] = parent
_lowercase : str = batch_size
_lowercase : Optional[int] = seq_length
_lowercase : Tuple = is_training
_lowercase : List[Any] = use_input_mask
_lowercase : Optional[Any] = use_token_type_ids
_lowercase : Any = use_labels
_lowercase : str = vocab_size
_lowercase : List[Any] = hidden_size
_lowercase : Union[str, Any] = num_hidden_layers
_lowercase : Tuple = num_attention_heads
_lowercase : Optional[int] = intermediate_size
_lowercase : Tuple = hidden_act
_lowercase : Dict = hidden_dropout_prob
_lowercase : Optional[int] = attention_probs_dropout_prob
_lowercase : Tuple = max_position_embeddings
_lowercase : List[str] = type_vocab_size
_lowercase : Optional[Any] = type_sequence_label_size
_lowercase : List[Any] = initializer_range
_lowercase : List[str] = num_labels
_lowercase : Union[str, Any] = num_choices
_lowercase : List[str] = scope
_lowercase : Union[str, Any] = embedding_size
def __a ( self ):
_lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase : Optional[int] = None
if self.use_input_mask:
_lowercase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase : int = None
if self.use_token_type_ids:
_lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowercase : Dict = None
_lowercase : Any = None
_lowercase : int = None
if self.use_labels:
_lowercase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowercase : Dict = ids_tensor([self.batch_size] , self.num_choices )
_lowercase : Optional[Any] = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Union[str, Any] = TFMobileBertModel(config=_lowerCAmelCase )
_lowercase : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowercase : Union[str, Any] = model(_lowerCAmelCase )
_lowercase : Tuple = [input_ids, input_mask]
_lowercase : str = model(_lowerCAmelCase )
_lowercase : List[str] = model(_lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[int] = TFMobileBertForMaskedLM(config=_lowerCAmelCase )
_lowercase : Union[str, Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowercase : int = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Any = TFMobileBertForNextSentencePrediction(config=_lowerCAmelCase )
_lowercase : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowercase : Optional[int] = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[Any] = TFMobileBertForPreTraining(config=_lowerCAmelCase )
_lowercase : Tuple = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowercase : Union[str, Any] = model(_lowerCAmelCase )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[int] = self.num_labels
_lowercase : Tuple = TFMobileBertForSequenceClassification(config=_lowerCAmelCase )
_lowercase : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowercase : List[str] = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[Any] = self.num_choices
_lowercase : List[str] = TFMobileBertForMultipleChoice(config=_lowerCAmelCase )
_lowercase : Optional[int] = tf.tile(tf.expand_dims(_lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_lowercase : Optional[int] = tf.tile(tf.expand_dims(_lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_lowercase : Tuple = tf.tile(tf.expand_dims(_lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_lowercase : str = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
_lowercase : Union[str, Any] = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : List[str] = self.num_labels
_lowercase : int = TFMobileBertForTokenClassification(config=_lowerCAmelCase )
_lowercase : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowercase : List[str] = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Tuple = TFMobileBertForQuestionAnswering(config=_lowerCAmelCase )
_lowercase : Any = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowercase : int = model(_lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self ):
_lowercase : List[str] = self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : int = config_and_inputs
_lowercase : Tuple = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
def __a ( self ):
_lowercase : List[str] = TFMobileBertModelTest.TFMobileBertModelTester(self )
_lowercase : Union[str, Any] = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=3_7 )
def __a ( self ):
self.config_tester.run_common_tests()
def __a ( self ):
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*_lowerCAmelCase )
def __a ( self ):
_lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*_lowerCAmelCase )
def __a ( self ):
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*_lowerCAmelCase )
def __a ( self ):
_lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*_lowerCAmelCase )
def __a ( self ):
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*_lowerCAmelCase )
def __a ( self ):
_lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*_lowerCAmelCase )
def __a ( self ):
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*_lowerCAmelCase )
def __a ( self ):
_lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*_lowerCAmelCase )
@slow
def __a ( self ):
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
_lowercase : List[str] = TFMobileBertModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def __a ( self ):
_lowercase : Dict = TFMobileBertForPreTraining.from_pretrained('google/mobilebert-uncased' )
_lowercase : Union[str, Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
_lowercase : List[str] = model(_lowerCAmelCase )[0]
_lowercase : str = [1, 6, 3_0_5_2_2]
self.assertEqual(output.shape , _lowerCAmelCase )
_lowercase : List[Any] = tf.constant(
[
[
[-4.5_91_95_47, -9.24_82_95, -9.64_52_56],
[-6.7_30_61_75, -6.44_02_84, -6.6_05_28_37],
[-7.2_74_35_06, -6.7_84_79_15, -6.02_46_73],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _lowerCAmelCase , atol=1E-4 )
| 677 | 0 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=9_9 , _lowerCAmelCase=1_3 , _lowerCAmelCase=7 , _lowerCAmelCase=9 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase=3_2 , _lowerCAmelCase=5 , _lowerCAmelCase=4 , _lowerCAmelCase=3_7 , _lowerCAmelCase=8 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.0_02 , _lowerCAmelCase=1 , _lowerCAmelCase=0 , _lowerCAmelCase=0 , _lowerCAmelCase=None , _lowerCAmelCase=None , ):
_lowercase : int = parent
_lowercase : Optional[int] = batch_size
_lowercase : Dict = encoder_seq_length
_lowercase : Union[str, Any] = decoder_seq_length
# For common tests
_lowercase : int = self.decoder_seq_length
_lowercase : Tuple = is_training
_lowercase : Optional[int] = use_attention_mask
_lowercase : List[str] = use_labels
_lowercase : int = vocab_size
_lowercase : Union[str, Any] = hidden_size
_lowercase : int = num_hidden_layers
_lowercase : int = num_attention_heads
_lowercase : Union[str, Any] = d_ff
_lowercase : Tuple = relative_attention_num_buckets
_lowercase : List[Any] = dropout_rate
_lowercase : Dict = initializer_factor
_lowercase : Optional[Any] = eos_token_id
_lowercase : str = pad_token_id
_lowercase : int = decoder_start_token_id
_lowercase : Dict = None
_lowercase : List[str] = decoder_layers
def __a ( self ):
return TaConfig.from_pretrained('google/umt5-base' )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , ):
if attention_mask is None:
_lowercase : Any = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
_lowercase : List[str] = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
_lowercase : Dict = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=UpperCamelCase_ )
if decoder_head_mask is None:
_lowercase : int = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=UpperCamelCase_ )
if cross_attn_head_mask is None:
_lowercase : List[str] = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=UpperCamelCase_ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def __a ( self ):
_lowercase : List[str] = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
_lowercase : Dict = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
_lowercase : Tuple = input_ids.clamp(self.pad_token_id + 1 )
_lowercase : Tuple = decoder_input_ids.clamp(self.pad_token_id + 1 )
_lowercase : Optional[Any] = self.get_config()
_lowercase : List[Any] = config.num_attention_heads
_lowercase : int = self.prepare_inputs_dict(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return config, input_dict
def __a ( self ):
_lowercase : Optional[int] = self.prepare_config_and_inputs()
return config, inputs_dict
def __a ( self ):
return TaConfig(
vocab_size=1_6_6 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def __a ( self ):
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
_lowercase : Dict = UMTaModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
_lowercase : int = model(
input_ids=UpperCamelCase_ , decoder_input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , )
_lowercase : Any = model(input_ids=UpperCamelCase_ , decoder_input_ids=UpperCamelCase_ )
_lowercase : Tuple = result.last_hidden_state
_lowercase : Optional[int] = result.past_key_values
_lowercase : str = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(UpperCamelCase_ ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
_lowercase : str = UMTaModel(config=UpperCamelCase_ ).get_decoder().to(UpperCamelCase_ ).eval()
# first forward pass
_lowercase : Optional[Any] = model(UpperCamelCase_ , use_cache=UpperCamelCase_ )
_lowercase : Dict = model(UpperCamelCase_ )
_lowercase : Dict = model(UpperCamelCase_ , use_cache=UpperCamelCase_ )
self.parent.assertTrue(len(UpperCamelCase_ ) == len(UpperCamelCase_ ) )
self.parent.assertTrue(len(UpperCamelCase_ ) == len(UpperCamelCase_ ) + 1 )
_lowercase : Any = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_lowercase : Any = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
_lowercase : int = torch.cat([input_ids, next_tokens] , dim=-1 )
_lowercase : Optional[int] = model(UpperCamelCase_ )['last_hidden_state']
_lowercase : str = model(UpperCamelCase_ , past_key_values=UpperCamelCase_ )['last_hidden_state']
# select random slice
_lowercase : List[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowercase : List[str] = output_from_no_past[:, -1, random_slice_idx].detach()
_lowercase : Optional[int] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-3 ) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , ):
_lowercase : Optional[int] = UMTaModel(config=UpperCamelCase_ ).to(UpperCamelCase_ ).half().eval()
_lowercase : Tuple = model(**UpperCamelCase_ )['last_hidden_state']
self.parent.assertFalse(torch.isnan(UpperCamelCase_ ).any().item() )
@require_torch
class lowerCAmelCase_ ( __snake_case , __snake_case , __snake_case , unittest.TestCase ):
_UpperCamelCase : Union[str, Any] = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
_UpperCamelCase : Tuple = (UMTaForConditionalGeneration,) if is_torch_available() else ()
_UpperCamelCase : Any = (
{
"conversational": UMTaForConditionalGeneration,
"feature-extraction": UMTaModel,
"summarization": UMTaForConditionalGeneration,
"text2text-generation": UMTaForConditionalGeneration,
"translation": UMTaForConditionalGeneration,
"question-answering": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
_UpperCamelCase : List[str] = True
_UpperCamelCase : Any = False
_UpperCamelCase : Any = False
_UpperCamelCase : Union[str, Any] = True
_UpperCamelCase : Any = True
# The small UMT5 model needs higher percentages for CPU/MP tests
_UpperCamelCase : Optional[Any] = [0.8, 0.9]
def __a ( self ):
_lowercase : List[str] = UMTaModelTester(self )
@unittest.skip('Test has a segmentation fault on torch 1.8.0' )
def __a ( self ):
_lowercase : Any = self.model_tester.prepare_config_and_inputs()
_lowercase : Optional[int] = UMTaModel(config_and_inputs[0] ).to(UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
UpperCamelCase_ , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F"""{tmpdirname}/t5_test.onnx""" , export_params=UpperCamelCase_ , opset_version=9 , input_names=['input_ids', 'decoder_input_ids'] , )
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' )
def __a ( self ):
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*UpperCamelCase_ )
def __a ( self ):
_lowercase : List[str] = ['encoder_attentions', 'decoder_attentions', 'cross_attentions']
_lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
_lowercase : Dict = config_and_inputs[0]
_lowercase : Union[str, Any] = UMTaForConditionalGeneration(UpperCamelCase_ ).eval()
model.to(UpperCamelCase_ )
_lowercase : List[Any] = {
'head_mask': torch.zeros(config.num_layers , config.num_heads , device=UpperCamelCase_ ),
'decoder_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCamelCase_ ),
'cross_attn_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCamelCase_ ),
}
for attn_name, (name, mask) in zip(UpperCamelCase_ , head_masking.items() ):
_lowercase : Tuple = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
_lowercase : Dict = torch.ones(
config.num_decoder_layers , config.num_heads , device=UpperCamelCase_ )
_lowercase : Optional[Any] = model.generate(
config_and_inputs[1]['input_ids'] , num_beams=1 , max_length=3 , output_attentions=UpperCamelCase_ , return_dict_in_generate=UpperCamelCase_ , **UpperCamelCase_ , )
# We check the state of decoder_attentions and cross_attentions just from the last step
_lowercase : Dict = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('Does not work on the tiny model as we keep hitting edge cases.' )
def __a ( self ):
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
@unittest.skip(
'Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged' )
def __a ( self ):
_lowercase : int = UMTaForConditionalGeneration.from_pretrained('google/umt5-small' , return_dict=UpperCamelCase_ ).to(UpperCamelCase_ )
_lowercase : int = AutoTokenizer.from_pretrained('google/umt5-small' , use_fast=UpperCamelCase_ , legacy=UpperCamelCase_ )
_lowercase : List[str] = [
'Bonjour monsieur <extra_id_0> bien <extra_id_1>.',
'No se como puedo <extra_id_0>.',
'This is the reason why we <extra_id_0> them.',
'The <extra_id_0> walks in <extra_id_1>, seats',
'A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.',
]
_lowercase : Dict = tokenizer(UpperCamelCase_ , return_tensors='pt' , padding=UpperCamelCase_ ).input_ids
# fmt: off
_lowercase : Optional[int] = torch.tensor(
[
[ 3_8_5_3_0, 2_1_0_7_0_3, 2_5_6_2_9_9, 1_4_1_0, 2_5_6_2_9_8, 2_7_4, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 8_2_6, 3_2_1, 6_7_1, 2_5_9_2_2, 2_5_6_2_9_9, 2_7_4, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1_4_6_0, 3_3_9, 3_1_2, 1_9_0_1_4, 1_0_6_2_0, 7_5_8, 2_5_6_2_9_9, 2_3_5_5,2_7_4, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 5_1_7, 2_5_6_2_9_9, 1_4_8_6_9, 2_8_1, 3_0_1, 2_5_6_2_9_8, 2_7_5, 1_1_9_9_8_3,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 3_2_0, 2_5_6_2_9_9, 1_4_8_6_9, 2_8_1, 2_2_3_4, 2_8_9, 2_2_7_5, 3_3_3,6_1_3_9_1, 2_8_9, 2_5_6_2_9_8, 5_4_3, 2_5_6_2_9_7, 1_6_8_7_1_4, 3_2_9, 2_5_6_2_9_6,2_7_4, 1],
] )
# fmt: on
torch.testing.assert_allclose(UpperCamelCase_ , UpperCamelCase_ )
_lowercase : Optional[int] = model.generate(input_ids.to(UpperCamelCase_ ) )
_lowercase : Tuple = [
'<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>',
'<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
]
_lowercase : Optional[int] = tokenizer.batch_decode(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
| 704 |
import qiskit
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> qiskit.result.counts.Counts:
_lowercase : Union[str, Any] = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
_lowercase : Optional[Any] = qiskit.QuantumCircuit(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
_lowercase : Optional[Any] = qiskit.execute(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , shots=1_000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCamelCase = single_qubit_measure(2, 2)
print(f'''Total count for various states are: {counts}''')
| 677 | 0 |
import argparse
import os
import re
import packaging.version
UpperCamelCase = "examples/"
UpperCamelCase = {
"examples": (re.compile(r"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(r"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(r"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), r"\1version=\"VERSION\","),
"doc": (re.compile(r"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
UpperCamelCase = {
"init": "src/diffusers/__init__.py",
"setup": "setup.py",
}
UpperCamelCase = "README.md"
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
with open(_A , 'r' , encoding='utf-8' , newline='\n' ) as f:
_lowercase : Tuple = f.read()
_lowercase : Optional[Any] = REPLACE_PATTERNS[pattern]
_lowercase : Optional[Any] = replace.replace('VERSION' , _A )
_lowercase : Optional[int] = re_pattern.sub(_A , _A )
with open(_A , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(_A )
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Dict:
for folder, directories, fnames in os.walk(_A ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('research_projects' )
if "legacy" in directories:
directories.remove('legacy' )
for fname in fnames:
if fname.endswith('.py' ):
update_version_in_file(os.path.join(_A , _A ) , _A , pattern='examples' )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> List[str]:
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_A , _A , _A )
if not patch:
update_version_in_examples(_A )
def __magic_name__ ( ) -> int:
_lowercase : List[Any] = "🤗 Transformers currently provides the following architectures"
_lowercase : Optional[int] = "1. Want to contribute a new model?"
with open(_A , 'r' , encoding='utf-8' , newline='\n' ) as f:
_lowercase : Any = f.readlines()
# Find the start of the list.
_lowercase : Optional[int] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
_lowercase : Union[str, Any] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('1.' ):
_lowercase : int = lines[index].replace(
'https://huggingface.co/docs/diffusers/main/model_doc' , 'https://huggingface.co/docs/diffusers/model_doc' , )
index += 1
with open(_A , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(_A )
def __magic_name__ ( ) -> List[Any]:
with open(REPLACE_FILES['init'] , 'r' ) as f:
_lowercase : List[str] = f.read()
_lowercase : List[Any] = REPLACE_PATTERNS["init"][0].search(_A ).groups()[0]
return packaging.version.parse(_A )
def __magic_name__ ( SCREAMING_SNAKE_CASE=False ) -> Optional[int]:
_lowercase : Optional[int] = get_version()
if patch and default_version.is_devrelease:
raise ValueError('Can\'t create a patch version from the dev branch, checkout a released version!' )
if default_version.is_devrelease:
_lowercase : List[str] = default_version.base_version
elif patch:
_lowercase : Any = F"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
_lowercase : Any = F"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
_lowercase : str = input(F"""Which version are you releasing? [{default_version}]""" )
if len(_A ) == 0:
_lowercase : str = default_version
print(F"""Updating version to {version}.""" )
global_version_update(_A , patch=_A )
def __magic_name__ ( ) -> str:
_lowercase : Union[str, Any] = get_version()
_lowercase : Tuple = F"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
_lowercase : List[str] = current_version.base_version
# Check with the user we got that right.
_lowercase : Tuple = input(F"""Which version are we developing now? [{dev_version}]""" )
if len(_A ) == 0:
_lowercase : int = dev_version
print(F"""Updating version to {version}.""" )
global_version_update(_A )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
UpperCamelCase = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 705 |
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCamelCase = "platform"
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , ) -> Dict:
if attention_mask is None:
_lowercase : str = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
_lowercase : List[Any] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
_lowercase : List[str] = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_lowercase : Optional[int] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_lowercase : str = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=1_3 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase=9_9 , _lowerCAmelCase=1_6 , _lowerCAmelCase=2 , _lowerCAmelCase=4 , _lowerCAmelCase=4 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=3_2 , _lowerCAmelCase=2 , _lowerCAmelCase=1 , _lowerCAmelCase=0 , _lowerCAmelCase=0.02 , ):
_lowercase : List[str] = parent
_lowercase : List[Any] = batch_size
_lowercase : Optional[Any] = seq_length
_lowercase : Optional[Any] = is_training
_lowercase : Tuple = use_labels
_lowercase : Dict = vocab_size
_lowercase : Any = hidden_size
_lowercase : Optional[Any] = num_hidden_layers
_lowercase : Union[str, Any] = num_attention_heads
_lowercase : Tuple = intermediate_size
_lowercase : Any = hidden_act
_lowercase : Optional[Any] = hidden_dropout_prob
_lowercase : Tuple = attention_probs_dropout_prob
_lowercase : Any = max_position_embeddings
_lowercase : str = eos_token_id
_lowercase : int = pad_token_id
_lowercase : Tuple = bos_token_id
_lowercase : List[Any] = initializer_range
def __a ( self ):
_lowercase : str = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
_lowercase : List[Any] = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
_lowercase : List[str] = shift_tokens_right(_lowerCAmelCase , 1 , 2 )
_lowercase : Tuple = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_lowerCAmelCase , )
_lowercase : List[Any] = prepare_blenderbot_inputs_dict(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return config, inputs_dict
def __a ( self ):
_lowercase , _lowercase : Union[str, Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[Any] = 2_0
_lowercase : List[Any] = model_class_name(_lowerCAmelCase )
_lowercase : List[Any] = model.encode(inputs_dict['input_ids'] )
_lowercase , _lowercase : int = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
_lowercase : Optional[Any] = model.init_cache(decoder_input_ids.shape[0] , _lowerCAmelCase , _lowerCAmelCase )
_lowercase : Optional[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' )
_lowercase : int = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_lowercase : Union[str, Any] = model.decode(
decoder_input_ids[:, :-1] , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase , decoder_position_ids=_lowerCAmelCase , )
_lowercase : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
_lowercase : int = model.decode(
decoder_input_ids[:, -1:] , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_lowerCAmelCase , )
_lowercase : List[Any] = model.decode(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : Optional[int] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Dict = 2_0
_lowercase : Any = model_class_name(_lowerCAmelCase )
_lowercase : int = model.encode(inputs_dict['input_ids'] )
_lowercase , _lowercase : Optional[int] = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
_lowercase : Union[str, Any] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_lowercase : List[str] = model.init_cache(decoder_input_ids.shape[0] , _lowerCAmelCase , _lowerCAmelCase )
_lowercase : int = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_lowercase : List[Any] = model.decode(
decoder_input_ids[:, :-1] , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase , decoder_position_ids=_lowerCAmelCase , )
_lowercase : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
_lowercase : Union[str, Any] = model.decode(
decoder_input_ids[:, -1:] , _lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_lowerCAmelCase , decoder_position_ids=_lowerCAmelCase , )
_lowercase : Dict = model.decode(_lowerCAmelCase , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase )
_lowercase : Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
@require_flax
class lowerCAmelCase_ ( unittest.TestCase ):
_UpperCamelCase : Tuple = 99
def __a ( self ):
_lowercase : Dict = np.array(
[
[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2],
[6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2],
[5, 9_7, 1_7, 3_9, 9_4, 4_0, 2],
[7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2],
[8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2],
[5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding
[6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2],
[5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2],
[4_8, 6_1, 9, 2_4, 7_1, 8_2, 2],
[2_6, 1, 6_0, 4_8, 2_2, 1_3, 2],
[2_1, 5, 6_2, 2_8, 1_4, 7_6, 2],
[4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2],
[7_0, 7_0, 5_0, 9, 2_8, 0, 2],
] , dtype=np.intaa , )
_lowercase : Union[str, Any] = input_ids.shape[0]
_lowercase : Optional[int] = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def __a ( self ):
_lowercase , _lowercase , _lowercase : int = self._get_config_and_data()
_lowercase : Union[str, Any] = FlaxBlenderbotSmallForConditionalGeneration(_lowerCAmelCase )
_lowercase : Union[str, Any] = lm_model(input_ids=_lowerCAmelCase )
_lowercase : str = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['logits'].shape , _lowerCAmelCase )
def __a ( self ):
_lowercase : Union[str, Any] = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , )
_lowercase : Optional[int] = FlaxBlenderbotSmallForConditionalGeneration(_lowerCAmelCase )
_lowercase : Optional[Any] = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa )
_lowercase : Optional[int] = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa )
_lowercase : Dict = lm_model(input_ids=_lowerCAmelCase , decoder_input_ids=_lowerCAmelCase )
_lowercase : Tuple = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['logits'].shape , _lowerCAmelCase )
def __a ( self ):
_lowercase : Dict = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa )
_lowercase : Union[str, Any] = shift_tokens_right(_lowerCAmelCase , 1 , 2 )
_lowercase : Dict = np.equal(_lowerCAmelCase , 1 ).astype(np.floataa ).sum()
_lowercase : Dict = np.equal(_lowerCAmelCase , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(_lowerCAmelCase , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class lowerCAmelCase_ ( __snake_case , unittest.TestCase , __snake_case ):
_UpperCamelCase : int = True
_UpperCamelCase : Any = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
_UpperCamelCase : Any = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def __a ( self ):
_lowercase : List[str] = FlaxBlenderbotSmallModelTester(self )
def __a ( self ):
_lowercase , _lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def __a ( self ):
_lowercase , _lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def __a ( self ):
_lowercase , _lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowercase : Any = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : str = model_class(_lowerCAmelCase )
@jax.jit
def encode_jitted(_lowerCAmelCase , _lowerCAmelCase=None , **_lowerCAmelCase ):
return model.encode(input_ids=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
with self.subTest('JIT Enabled' ):
_lowercase : Dict = encode_jitted(**_lowerCAmelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
_lowercase : Dict = encode_jitted(**_lowerCAmelCase ).to_tuple()
self.assertEqual(len(_lowerCAmelCase ) , len(_lowerCAmelCase ) )
for jitted_output, output in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def __a ( self ):
_lowercase , _lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowercase : int = model_class(_lowerCAmelCase )
_lowercase : int = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] )
_lowercase : List[Any] = {
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
return model.decode(
decoder_input_ids=_lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , encoder_outputs=_lowerCAmelCase , )
with self.subTest('JIT Enabled' ):
_lowercase : Dict = decode_jitted(**_lowerCAmelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
_lowercase : Any = decode_jitted(**_lowerCAmelCase ).to_tuple()
self.assertEqual(len(_lowerCAmelCase ) , len(_lowerCAmelCase ) )
for jitted_output, output in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def __a ( self ):
for model_class_name in self.all_model_classes:
_lowercase : Dict = model_class_name.from_pretrained('facebook/blenderbot_small-90M' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
_lowercase : Any = np.ones((1, 1) ) * model.config.eos_token_id
_lowercase : int = model(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
| 677 | 0 |
'''simple docstring'''
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
UpperCamelCase = {
"distilbert": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"roberta": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"bert": (BertConfig, BertForMaskedLM, BertTokenizer),
"gpt2": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Dict:
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
if args.student_type == "roberta":
_lowercase : int = False
elif args.student_type == "gpt2":
_lowercase : int = False
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
if args.student_type == "roberta":
_lowercase : Tuple = False
def __magic_name__ ( ) -> Any:
_lowercase : List[Any] = argparse.ArgumentParser(description='Training' )
parser.add_argument('--force' , action='store_true' , help='Overwrite dump_path if it already exists.' )
parser.add_argument(
'--dump_path' , type=lowerCamelCase_ , required=lowerCamelCase_ , help='The output directory (log, checkpoints, parameters, etc.)' )
parser.add_argument(
'--data_file' , type=lowerCamelCase_ , required=lowerCamelCase_ , help='The binarized file (tokenized + tokens_to_ids) and grouped by sequence.' , )
parser.add_argument(
'--student_type' , type=lowerCamelCase_ , choices=['distilbert', 'roberta', 'gpt2'] , required=lowerCamelCase_ , help='The student type (DistilBERT, RoBERTa).' , )
parser.add_argument('--student_config' , type=lowerCamelCase_ , required=lowerCamelCase_ , help='Path to the student configuration.' )
parser.add_argument(
'--student_pretrained_weights' , default=lowerCamelCase_ , type=lowerCamelCase_ , help='Load student initialization checkpoint.' )
parser.add_argument(
'--teacher_type' , choices=['bert', 'roberta', 'gpt2'] , required=lowerCamelCase_ , help='Teacher type (BERT, RoBERTa).' )
parser.add_argument('--teacher_name' , type=lowerCamelCase_ , required=lowerCamelCase_ , help='The teacher model.' )
parser.add_argument('--temperature' , default=2.0 , type=lowerCamelCase_ , help='Temperature for the softmax temperature.' )
parser.add_argument(
'--alpha_ce' , default=0.5 , type=lowerCamelCase_ , help='Linear weight for the distillation loss. Must be >=0.' )
parser.add_argument(
'--alpha_mlm' , default=0.0 , type=lowerCamelCase_ , help='Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.' , )
parser.add_argument('--alpha_clm' , default=0.5 , type=lowerCamelCase_ , help='Linear weight for the CLM loss. Must be >=0.' )
parser.add_argument('--alpha_mse' , default=0.0 , type=lowerCamelCase_ , help='Linear weight of the MSE loss. Must be >=0.' )
parser.add_argument(
'--alpha_cos' , default=0.0 , type=lowerCamelCase_ , help='Linear weight of the cosine embedding loss. Must be >=0.' )
parser.add_argument(
'--mlm' , action='store_true' , help='The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.' )
parser.add_argument(
'--mlm_mask_prop' , default=0.15 , type=lowerCamelCase_ , help='Proportion of tokens for which we need to make a prediction.' , )
parser.add_argument('--word_mask' , default=0.8 , type=lowerCamelCase_ , help='Proportion of tokens to mask out.' )
parser.add_argument('--word_keep' , default=0.1 , type=lowerCamelCase_ , help='Proportion of tokens to keep.' )
parser.add_argument('--word_rand' , default=0.1 , type=lowerCamelCase_ , help='Proportion of tokens to randomly replace.' )
parser.add_argument(
'--mlm_smoothing' , default=0.7 , type=lowerCamelCase_ , help='Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).' , )
parser.add_argument('--token_counts' , type=lowerCamelCase_ , help='The token counts in the data_file for MLM.' )
parser.add_argument(
'--restrict_ce_to_mask' , action='store_true' , help='If true, compute the distillation loss only the [MLM] prediction distribution.' , )
parser.add_argument(
'--freeze_pos_embs' , action='store_true' , help='Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.' , )
parser.add_argument(
'--freeze_token_type_embds' , action='store_true' , help='Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.' , )
parser.add_argument('--n_epoch' , type=lowerCamelCase_ , default=3 , help='Number of pass on the whole dataset.' )
parser.add_argument('--batch_size' , type=lowerCamelCase_ , default=5 , help='Batch size (for each process).' )
parser.add_argument(
'--group_by_size' , action='store_false' , help='If true, group sequences that have similar length into the same batch. Default is true.' , )
parser.add_argument(
'--gradient_accumulation_steps' , type=lowerCamelCase_ , default=50 , help='Gradient accumulation for larger training batches.' , )
parser.add_argument('--warmup_prop' , default=0.05 , type=lowerCamelCase_ , help='Linear warmup proportion.' )
parser.add_argument('--weight_decay' , default=0.0 , type=lowerCamelCase_ , help='Weight decay if we apply some.' )
parser.add_argument('--learning_rate' , default=5E-4 , type=lowerCamelCase_ , help='The initial learning rate for Adam.' )
parser.add_argument('--adam_epsilon' , default=1E-6 , type=lowerCamelCase_ , help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm' , default=5.0 , type=lowerCamelCase_ , help='Max gradient norm.' )
parser.add_argument('--initializer_range' , default=0.02 , type=lowerCamelCase_ , help='Random initialization range.' )
parser.add_argument(
'--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , )
parser.add_argument(
'--fp16_opt_level' , type=lowerCamelCase_ , default='O1' , help=(
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'
'See details at https://nvidia.github.io/apex/amp.html'
) , )
parser.add_argument('--n_gpu' , type=lowerCamelCase_ , default=1 , help='Number of GPUs in the node.' )
parser.add_argument('--local_rank' , type=lowerCamelCase_ , default=-1 , help='Distributed training - Local rank' )
parser.add_argument('--seed' , type=lowerCamelCase_ , default=56 , help='Random seed' )
parser.add_argument('--log_interval' , type=lowerCamelCase_ , default=500 , help='Tensorboard logging interval.' )
parser.add_argument('--checkpoint_interval' , type=lowerCamelCase_ , default=4_000 , help='Checkpoint interval.' )
_lowercase : Tuple = parser.parse_args()
sanity_checks(lowerCamelCase_ )
# ARGS #
init_gpu_params(lowerCamelCase_ )
set_seed(lowerCamelCase_ )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F"""Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"""
' itUse `--force` if you want to overwrite it' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F"""Experiment will be dumped and logged in {args.dump_path}""" )
# SAVE PARAMS #
logger.info(F"""Param: {args}""" )
with open(os.path.join(args.dump_path , 'parameters.json' ) , 'w' ) as f:
json.dump(vars(lowerCamelCase_ ) , lowerCamelCase_ , indent=4 )
git_log(args.dump_path )
_lowercase : Optional[int] = MODEL_CLASSES[args.student_type]
_lowercase : List[str] = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
_lowercase : Tuple = teacher_tokenizer_class.from_pretrained(args.teacher_name )
_lowercase : Dict = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
_lowercase : List[Any] = tokenizer.all_special_tokens.index(lowerCamelCase_ )
_lowercase : Dict = tokenizer.all_special_ids[idx]
logger.info(F"""Special tokens {special_tok_ids}""" )
_lowercase : int = special_tok_ids
_lowercase : Any = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F"""Loading data from {args.data_file}""" )
with open(args.data_file , 'rb' ) as fp:
_lowercase : Optional[int] = pickle.load(lowerCamelCase_ )
if args.mlm:
logger.info(F"""Loading token counts from {args.token_counts} (already pre-computed)""" )
with open(args.token_counts , 'rb' ) as fp:
_lowercase : List[Any] = pickle.load(lowerCamelCase_ )
_lowercase : List[Any] = np.maximum(lowerCamelCase_ , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
_lowercase : str = 0.0 # do not predict special tokens
_lowercase : Tuple = torch.from_numpy(lowerCamelCase_ )
else:
_lowercase : Optional[Any] = None
_lowercase : Union[str, Any] = LmSeqsDataset(params=lowerCamelCase_ , data=lowerCamelCase_ )
logger.info('Data loader created.' )
# STUDENT #
logger.info(F"""Loading student config from {args.student_config}""" )
_lowercase : int = student_config_class.from_pretrained(args.student_config )
_lowercase : Optional[Any] = True
if args.student_pretrained_weights is not None:
logger.info(F"""Loading pretrained weights from {args.student_pretrained_weights}""" )
_lowercase : Dict = student_model_class.from_pretrained(args.student_pretrained_weights , config=lowerCamelCase_ )
else:
_lowercase : str = student_model_class(lowerCamelCase_ )
if args.n_gpu > 0:
student.to(F"""cuda:{args.local_rank}""" )
logger.info('Student loaded.' )
# TEACHER #
_lowercase : Union[str, Any] = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=lowerCamelCase_ )
if args.n_gpu > 0:
teacher.to(F"""cuda:{args.local_rank}""" )
logger.info(F"""Teacher loaded from {args.teacher_name}.""" )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(lowerCamelCase_ , lowerCamelCase_ )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(lowerCamelCase_ , lowerCamelCase_ )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
_lowercase : Dict = Distiller(
params=lowerCamelCase_ , dataset=lowerCamelCase_ , token_probs=lowerCamelCase_ , student=lowerCamelCase_ , teacher=lowerCamelCase_ )
distiller.train()
logger.info('Let\'s go get some drinks.' )
if __name__ == "__main__":
main()
| 706 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json",
"allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json",
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"
),
}
class lowerCAmelCase_ ( __snake_case ):
_UpperCamelCase : Dict = "longformer"
def __init__( self , _lowerCAmelCase = 5_1_2 , _lowerCAmelCase = 2 , _lowerCAmelCase = 1 , _lowerCAmelCase = 0 , _lowerCAmelCase = 2 , _lowerCAmelCase = 3_0_5_2_2 , _lowerCAmelCase = 7_6_8 , _lowerCAmelCase = 1_2 , _lowerCAmelCase = 1_2 , _lowerCAmelCase = 3_0_7_2 , _lowerCAmelCase = "gelu" , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 5_1_2 , _lowerCAmelCase = 2 , _lowerCAmelCase = 0.02 , _lowerCAmelCase = 1E-12 , _lowerCAmelCase = False , **_lowerCAmelCase , ):
super().__init__(pad_token_id=_lowerCAmelCase , **_lowerCAmelCase )
_lowercase : Optional[int] = attention_window
_lowercase : str = sep_token_id
_lowercase : Optional[Any] = bos_token_id
_lowercase : List[Any] = eos_token_id
_lowercase : Optional[Any] = vocab_size
_lowercase : List[Any] = hidden_size
_lowercase : Union[str, Any] = num_hidden_layers
_lowercase : Optional[int] = num_attention_heads
_lowercase : List[str] = hidden_act
_lowercase : List[str] = intermediate_size
_lowercase : List[Any] = hidden_dropout_prob
_lowercase : str = attention_probs_dropout_prob
_lowercase : Any = max_position_embeddings
_lowercase : int = type_vocab_size
_lowercase : Optional[int] = initializer_range
_lowercase : List[Any] = layer_norm_eps
_lowercase : List[str] = onnx_export
class lowerCAmelCase_ ( __snake_case ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase = "default" , _lowerCAmelCase = None ):
super().__init__(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
_lowercase : str = True
@property
def __a ( self ):
if self.task == "multiple-choice":
_lowercase : List[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowercase : int = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('global_attention_mask', dynamic_axis),
] )
@property
def __a ( self ):
_lowercase : Optional[int] = super().outputs
if self.task == "default":
_lowercase : List[str] = {0: 'batch'}
return outputs
@property
def __a ( self ):
return 1E-4
@property
def __a ( self ):
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 1_4 )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ):
_lowercase : int = super().generate_dummy_inputs(
preprocessor=_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
_lowercase : str = torch.zeros_like(inputs['input_ids'] )
# make every second token global
_lowercase : Any = 1
return inputs
| 677 | 0 |
'''simple docstring'''
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class lowerCAmelCase_ ( unittest.TestCase , UpperCAmelCase_ ):
def __a ( self ):
_lowercase : List[str] = load_tool('text-classification' )
self.tool.setup()
_lowercase : Dict = load_tool('text-classification' , remote=_lowercase )
def __a ( self ):
_lowercase : Tuple = self.tool('That\'s quite cool' , ['positive', 'negative'] )
self.assertEqual(_lowercase , 'positive' )
def __a ( self ):
_lowercase : Optional[Any] = self.remote_tool('That\'s quite cool' , ['positive', 'negative'] )
self.assertEqual(_lowercase , 'positive' )
def __a ( self ):
_lowercase : Tuple = self.tool(text='That\'s quite cool' , labels=['positive', 'negative'] )
self.assertEqual(_lowercase , 'positive' )
def __a ( self ):
_lowercase : Any = self.remote_tool(text='That\'s quite cool' , labels=['positive', 'negative'] )
self.assertEqual(_lowercase , 'positive' )
| 707 |
from __future__ import annotations
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> bool:
return len(set(SCREAMING_SNAKE_CASE ) ) == len(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 677 | 0 |
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str | Literal[False]:
_lowercase : str = list(__A )
_lowercase : Optional[Any] = list(__A )
_lowercase : str = 0
for i in range(len(__A ) ):
if lista[i] != lista[i]:
count += 1
_lowercase : Union[str, Any] = '''_'''
if count > 1:
return False
else:
return "".join(__A )
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> list[str]:
_lowercase : int = []
while True:
_lowercase : Optional[int] = ['''$'''] * len(__A )
_lowercase : int = []
for i in range(len(__A ) ):
for j in range(i + 1 , len(__A ) ):
_lowercase : str = compare_string(binary[i] , binary[j] )
if k is False:
_lowercase : List[str] = '''*'''
_lowercase : Any = '''*'''
temp.append('X' )
for i in range(len(__A ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(__A ) == 0:
return pi
_lowercase : Union[str, Any] = list(set(__A ) )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> list[str]:
_lowercase : str = []
for minterm in minterms:
_lowercase : Dict = ''''''
for _ in range(__A ):
_lowercase : Union[str, Any] = str(minterm % 2 ) + string
minterm //= 2
temp.append(__A )
return temp
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> bool:
_lowercase : Dict = list(__A )
_lowercase : Tuple = list(__A )
_lowercase : int = 0
for i in range(len(__A ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> list[str]:
_lowercase : Optional[Any] = []
_lowercase : List[str] = [0] * len(__A )
for i in range(len(chart[0] ) ):
_lowercase : int = 0
_lowercase : List[Any] = -1
for j in range(len(__A ) ):
if chart[j][i] == 1:
count += 1
_lowercase : List[Any] = j
if count == 1:
_lowercase : int = 1
for i in range(len(__A ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(__A ) ):
_lowercase : Any = 0
temp.append(prime_implicants[i] )
while True:
_lowercase : Any = 0
_lowercase : Any = -1
_lowercase : List[str] = 0
for i in range(len(__A ) ):
_lowercase : List[Any] = chart[i].count(1 )
if count_n > max_n:
_lowercase : Dict = count_n
_lowercase : Union[str, Any] = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(__A ) ):
_lowercase : int = 0
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> list[list[int]]:
_lowercase : List[Any] = [[0 for x in range(len(__A ) )] for x in range(len(__A ) )]
for i in range(len(__A ) ):
_lowercase : Union[str, Any] = prime_implicants[i].count('_' )
for j in range(len(__A ) ):
if is_for_table(prime_implicants[i] , binary[j] , __A ):
_lowercase : Union[str, Any] = 1
return chart
def __magic_name__ ( ) -> None:
_lowercase : Union[str, Any] = int(input('Enter the no. of variables\n' ) )
_lowercase : Dict = [
float(__A )
for x in input(
'Enter the decimal representation of Minterms \'Spaces Separated\'\n' ).split()
]
_lowercase : int = decimal_to_binary(__A , __A )
_lowercase : Tuple = check(__A )
print('Prime Implicants are:' )
print(__A )
_lowercase : Optional[Any] = prime_implicant_chart(__A , __A )
_lowercase : Tuple = selection(__A , __A )
print('Essential Prime Implicants are:' )
print(__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 708 |
import math
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0 , SCREAMING_SNAKE_CASE = 0 ) -> list:
_lowercase : List[str] = end or len(SCREAMING_SNAKE_CASE )
for i in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
_lowercase : Dict = i
_lowercase : str = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
_lowercase : Optional[Any] = array[temp_index - 1]
temp_index -= 1
_lowercase : Optional[Any] = temp_index_value
return array
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> None: # Max Heap
_lowercase : List[str] = index
_lowercase : List[str] = 2 * index + 1 # Left Node
_lowercase : Union[str, Any] = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
_lowercase : Any = left_index
if right_index < heap_size and array[largest] < array[right_index]:
_lowercase : str = right_index
if largest != index:
_lowercase , _lowercase : List[str] = array[largest], array[index]
heapify(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> list:
_lowercase : Optional[Any] = len(SCREAMING_SNAKE_CASE )
for i in range(n // 2 , -1 , -1 ):
heapify(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for i in range(n - 1 , 0 , -1 ):
_lowercase , _lowercase : List[Any] = array[0], array[i]
heapify(SCREAMING_SNAKE_CASE , 0 , SCREAMING_SNAKE_CASE )
return array
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
_lowercase : Optional[Any] = low
_lowercase : Tuple = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
_lowercase , _lowercase : Tuple = array[j], array[i]
i += 1
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> list:
if len(SCREAMING_SNAKE_CASE ) == 0:
return array
_lowercase : List[str] = 2 * math.ceil(math.loga(len(SCREAMING_SNAKE_CASE ) ) )
_lowercase : str = 16
return intro_sort(SCREAMING_SNAKE_CASE , 0 , len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> list:
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(SCREAMING_SNAKE_CASE )
max_depth -= 1
_lowercase : int = median_of_a(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , start + ((end - start) // 2) + 1 , end - 1 )
_lowercase : str = partition(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
intro_sort(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_lowercase : List[Any] = p
return insertion_sort(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase = input("Enter numbers separated by a comma : ").strip()
UpperCamelCase = [float(item) for item in user_input.split(",")]
print(sort(unsorted))
| 677 | 0 |
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
UpperCamelCase = 256_047
UpperCamelCase = 256_145
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( UpperCAmelCase__ , unittest.TestCase ):
_UpperCamelCase : int = NllbTokenizer
_UpperCamelCase : Tuple = NllbTokenizerFast
_UpperCamelCase : Union[str, Any] = True
_UpperCamelCase : List[Any] = True
_UpperCamelCase : List[str] = {}
def __a ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
_lowercase : Union[str, Any] = NllbTokenizer(_lowerCAmelCase , keep_accents=_lowerCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def __a ( self ):
_lowercase : List[str] = NllbTokenizer(_lowerCAmelCase , keep_accents=_lowerCAmelCase )
_lowercase : Optional[Any] = tokenizer.tokenize('This is a test' )
self.assertListEqual(_lowerCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
_lowercase : List[Any] = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
_lowerCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
_lowercase : List[str] = tokenizer.convert_tokens_to_ids(_lowerCAmelCase )
self.assertListEqual(
_lowerCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
_lowercase : Optional[Any] = tokenizer.convert_ids_to_tokens(_lowerCAmelCase )
self.assertListEqual(
_lowerCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def __a ( self ):
_lowercase : Any = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-nllb', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_lowercase : Tuple = self.rust_tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
_lowercase : Any = self.tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
_lowercase : Optional[Any] = tempfile.mkdtemp()
_lowercase : List[str] = tokenizer_r.save_pretrained(_lowerCAmelCase )
_lowercase : List[str] = tokenizer_p.save_pretrained(_lowerCAmelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
_lowercase : List[Any] = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(_lowerCAmelCase , _lowerCAmelCase )
# Checks everything loads correctly in the same way
_lowercase : Optional[int] = tokenizer_r.from_pretrained(_lowerCAmelCase )
_lowercase : List[str] = tokenizer_p.from_pretrained(_lowerCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_lowerCAmelCase , _lowerCAmelCase ) )
shutil.rmtree(_lowerCAmelCase )
# Save tokenizer rust, legacy_format=True
_lowercase : str = tempfile.mkdtemp()
_lowercase : Tuple = tokenizer_r.save_pretrained(_lowerCAmelCase , legacy_format=_lowerCAmelCase )
_lowercase : Tuple = tokenizer_p.save_pretrained(_lowerCAmelCase )
# Checks it save with the same files
self.assertSequenceEqual(_lowerCAmelCase , _lowerCAmelCase )
# Checks everything loads correctly in the same way
_lowercase : List[Any] = tokenizer_r.from_pretrained(_lowerCAmelCase )
_lowercase : Union[str, Any] = tokenizer_p.from_pretrained(_lowerCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_lowerCAmelCase , _lowerCAmelCase ) )
shutil.rmtree(_lowerCAmelCase )
# Save tokenizer rust, legacy_format=False
_lowercase : Any = tempfile.mkdtemp()
_lowercase : Any = tokenizer_r.save_pretrained(_lowerCAmelCase , legacy_format=_lowerCAmelCase )
_lowercase : List[str] = tokenizer_p.save_pretrained(_lowerCAmelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
_lowercase : Union[str, Any] = tokenizer_r.from_pretrained(_lowerCAmelCase )
_lowercase : Union[str, Any] = tokenizer_p.from_pretrained(_lowerCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_lowerCAmelCase , _lowerCAmelCase ) )
shutil.rmtree(_lowerCAmelCase )
@require_torch
def __a ( self ):
if not self.test_seqaseq:
return
_lowercase : List[str] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Longer text that will definitely require truncation.
_lowercase : Optional[int] = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for'
' Syria is that \'there is no military solution\' to the nearly five-year conflict and more weapons'
' will only worsen the violence and misery for millions of people.',
]
_lowercase : Dict = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al'
' Rusiei pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi'
' că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
try:
_lowercase : Tuple = tokenizer.prepare_seqaseq_batch(
src_texts=_lowerCAmelCase , tgt_texts=_lowerCAmelCase , max_length=3 , max_target_length=1_0 , return_tensors='pt' , src_lang='eng_Latn' , tgt_lang='ron_Latn' , )
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 1_0 )
# max_target_length will default to max_length if not specified
_lowercase : List[str] = tokenizer.prepare_seqaseq_batch(
_lowerCAmelCase , tgt_texts=_lowerCAmelCase , max_length=3 , return_tensors='pt' )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 3 )
_lowercase : str = tokenizer.prepare_seqaseq_batch(
src_texts=_lowerCAmelCase , max_length=3 , max_target_length=1_0 , return_tensors='pt' )
self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 )
self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 )
self.assertNotIn('decoder_input_ids' , _lowerCAmelCase )
@unittest.skip('Unfortunately way too slow to build a BPE with SentencePiece.' )
def __a ( self ):
pass
def __a ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_lowercase : Union[str, Any] = [AddedToken('<special>' , lstrip=_lowerCAmelCase )]
_lowercase : List[Any] = self.rust_tokenizer_class.from_pretrained(
_lowerCAmelCase , additional_special_tokens=_lowerCAmelCase , **_lowerCAmelCase )
_lowercase : List[str] = tokenizer_r.encode('Hey this is a <special> token' )
_lowercase : int = tokenizer_r.encode('<special>' , add_special_tokens=_lowerCAmelCase )[0]
self.assertTrue(special_token_id in r_output )
if self.test_slow_tokenizer:
_lowercase : Tuple = self.rust_tokenizer_class.from_pretrained(
_lowerCAmelCase , additional_special_tokens=_lowerCAmelCase , **_lowerCAmelCase , )
_lowercase : Tuple = self.tokenizer_class.from_pretrained(
_lowerCAmelCase , additional_special_tokens=_lowerCAmelCase , **_lowerCAmelCase )
_lowercase : Union[str, Any] = tokenizer_p.encode('Hey this is a <special> token' )
_lowercase : Dict = tokenizer_cr.encode('Hey this is a <special> token' )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertTrue(special_token_id in p_output )
self.assertTrue(special_token_id in cr_output )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( unittest.TestCase ):
_UpperCamelCase : Union[str, Any] = "facebook/nllb-200-distilled-600M"
_UpperCamelCase : List[Any] = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
_UpperCamelCase : Optional[int] = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
_UpperCamelCase : List[Any] = [
256047,
16297,
134408,
8165,
248066,
14734,
950,
1135,
105721,
3573,
83,
27352,
108,
49486,
2,
]
@classmethod
def __a ( cls ):
_lowercase : Tuple = NllbTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='eng_Latn' , tgt_lang='ron_Latn' )
_lowercase : Any = 1
return cls
def __a ( self ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ace_Arab'] , 2_5_6_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ace_Latn'] , 2_5_6_0_0_2 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['fra_Latn'] , 2_5_6_0_5_7 )
def __a ( self ):
_lowercase : str = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _lowerCAmelCase )
def __a ( self ):
self.assertIn(_lowerCAmelCase , self.tokenizer.all_special_ids )
# fmt: off
_lowercase : Optional[Any] = [RO_CODE, 4_2_5_4, 9_8_0_6_8, 1_1_2_9_2_3, 3_9_0_7_2, 3_9_0_9, 7_1_3, 1_0_2_7_6_7, 2_6, 1_7_3_1_4, 3_5_6_4_2, 1_4_6_8_3, 3_3_1_1_8, 2_0_2_2, 6_6_9_8_7, 2, 2_5_6_0_4_7]
# fmt: on
_lowercase : Tuple = self.tokenizer.decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
_lowercase : int = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertNotIn(self.tokenizer.eos_token , _lowerCAmelCase )
def __a ( self ):
_lowercase : Optional[int] = ['this is gunna be a long sentence ' * 2_0]
assert isinstance(src_text[0] , _lowerCAmelCase )
_lowercase : int = 1_0
_lowercase : List[Any] = self.tokenizer(_lowerCAmelCase , max_length=_lowerCAmelCase , truncation=_lowerCAmelCase ).input_ids[0]
self.assertEqual(ids[-1] , 2 )
self.assertEqual(ids[0] , _lowerCAmelCase )
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
def __a ( self ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [2_5_6_2_0_3, 3] )
def __a ( self ):
_lowercase : Optional[Any] = tempfile.mkdtemp()
_lowercase : Dict = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(_lowerCAmelCase )
_lowercase : Union[str, Any] = NllbTokenizer.from_pretrained(_lowerCAmelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _lowerCAmelCase )
@require_torch
def __a ( self ):
_lowercase : str = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
_lowercase : int = shift_tokens_right(
batch['labels'] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id['ron_Latn'] )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual((2, 1_5) , batch.input_ids.shape )
self.assertEqual((2, 1_5) , batch.attention_mask.shape )
_lowercase : List[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , _lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , batch.decoder_input_ids[0, 0] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def __a ( self ):
_lowercase : str = self.tokenizer(self.src_text , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=3 , return_tensors='pt' )
_lowercase : List[str] = self.tokenizer(
text_target=self.tgt_text , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=1_0 , return_tensors='pt' )
_lowercase : Dict = targets['input_ids']
_lowercase : str = shift_tokens_right(
_lowerCAmelCase , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def __a ( self ):
_lowercase : List[Any] = self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='eng_Latn' , tgt_lang='fra_Latn' )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , {
# A, test, EOS, en_XX
'input_ids': [[2_5_6_0_4_7, 7_0, 7_3_5_6, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 2_5_6_0_5_7,
} , )
@require_torch
def __a ( self ):
_lowercase : int = True
_lowercase : Optional[int] = self.tokenizer(
'UN Chief says there is no military solution in Syria' , src_lang='eng_Latn' , tgt_lang='fra_Latn' )
self.assertEqual(
inputs.input_ids , [1_6_2_9_7, 1_3_4_4_0_8, 2_5_6_5_3, 6_3_7_0, 2_4_8, 2_5_4, 1_0_3_9_2_9, 9_4_9_9_5, 1_0_8, 4_9_4_8_6, 2, 2_5_6_0_4_7] )
_lowercase : Any = False
_lowercase : str = self.tokenizer(
'UN Chief says there is no military solution in Syria' , src_lang='eng_Latn' , tgt_lang='fra_Latn' )
self.assertEqual(
inputs.input_ids , [2_5_6_0_4_7, 1_6_2_9_7, 1_3_4_4_0_8, 2_5_6_5_3, 6_3_7_0, 2_4_8, 2_5_4, 1_0_3_9_2_9, 9_4_9_9_5, 1_0_8, 4_9_4_8_6, 2] )
| 709 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["CLIPFeatureExtractor"]
UpperCamelCase = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 677 | 0 |
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
with open(lowerCamelCase__ ) as metadata_file:
_lowercase : List[Any] = json.load(lowerCamelCase__ )
_lowercase : Dict = LukeConfig(use_entity_aware_attention=lowerCamelCase__ , **metadata['model_config'] )
# Load in the weights from the checkpoint_path
_lowercase : Optional[Any] = torch.load(lowerCamelCase__ , map_location='cpu' )["module"]
# Load the entity vocab file
_lowercase : Optional[int] = load_original_entity_vocab(lowerCamelCase__ )
# add an entry for [MASK2]
_lowercase : Dict = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
_lowercase : List[str] = XLMRobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] )
# Add special tokens to the token vocabulary for downstream tasks
_lowercase : List[str] = AddedToken('<ent>' , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ )
_lowercase : Union[str, Any] = AddedToken('<ent2>' , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ )
tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(lowerCamelCase__ )
with open(os.path.join(lowerCamelCase__ , 'tokenizer_config.json' ) , 'r' ) as f:
_lowercase : Dict = json.load(lowerCamelCase__ )
_lowercase : Optional[int] = "MLukeTokenizer"
with open(os.path.join(lowerCamelCase__ , 'tokenizer_config.json' ) , 'w' ) as f:
json.dump(lowerCamelCase__ , lowerCamelCase__ )
with open(os.path.join(lowerCamelCase__ , MLukeTokenizer.vocab_files_names['entity_vocab_file'] ) , 'w' ) as f:
json.dump(lowerCamelCase__ , lowerCamelCase__ )
_lowercase : Tuple = MLukeTokenizer.from_pretrained(lowerCamelCase__ )
# Initialize the embeddings of the special tokens
_lowercase : Any = tokenizer.convert_tokens_to_ids(['@'] )[0]
_lowercase : str = tokenizer.convert_tokens_to_ids(['#'] )[0]
_lowercase : Tuple = state_dict["embeddings.word_embeddings.weight"]
_lowercase : List[str] = word_emb[ent_init_index].unsqueeze(0 )
_lowercase : int = word_emb[enta_init_index].unsqueeze(0 )
_lowercase : Any = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
_lowercase : Union[str, Any] = state_dict[bias_name]
_lowercase : Tuple = decoder_bias[ent_init_index].unsqueeze(0 )
_lowercase : str = decoder_bias[enta_init_index].unsqueeze(0 )
_lowercase : Optional[Any] = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_lowercase : int = F"""encoder.layer.{layer_index}.attention.self."""
_lowercase : Tuple = state_dict[prefix + matrix_name]
_lowercase : int = state_dict[prefix + matrix_name]
_lowercase : Union[str, Any] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_lowercase : Tuple = state_dict["entity_embeddings.entity_embeddings.weight"]
_lowercase : Any = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 )
_lowercase : List[str] = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
_lowercase : Union[str, Any] = state_dict["entity_predictions.bias"]
_lowercase : Union[str, Any] = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 )
_lowercase : str = torch.cat([entity_prediction_bias, entity_mask_bias] )
_lowercase : Optional[Any] = LukeForMaskedLM(config=lowerCamelCase__ ).eval()
state_dict.pop('entity_predictions.decoder.weight' )
state_dict.pop('lm_head.decoder.weight' )
state_dict.pop('lm_head.decoder.bias' )
_lowercase : Any = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('lm_head' ) or key.startswith('entity_predictions' )):
_lowercase : Dict = state_dict[key]
else:
_lowercase : Dict = state_dict[key]
_lowercase : Optional[Any] = model.load_state_dict(lowerCamelCase__ , strict=lowerCamelCase__ )
if set(lowerCamelCase__ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F"""Unexpected unexpected_keys: {unexpected_keys}""" )
if set(lowerCamelCase__ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F"""Unexpected missing_keys: {missing_keys}""" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
_lowercase : List[Any] = MLukeTokenizer.from_pretrained(lowerCamelCase__ , task='entity_classification' )
_lowercase : Optional[int] = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
_lowercase : Dict = (0, 9)
_lowercase : List[str] = tokenizer(lowerCamelCase__ , entity_spans=[span] , return_tensors='pt' )
_lowercase : int = model(**lowerCamelCase__ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_lowercase : int = torch.Size((1, 33, 768) )
_lowercase : str = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCamelCase__ , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_lowercase : Tuple = torch.Size((1, 1, 768) )
_lowercase : Tuple = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
F""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , lowerCamelCase__ , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
_lowercase : Optional[int] = MLukeTokenizer.from_pretrained(lowerCamelCase__ )
_lowercase : int = "Tokyo is the capital of <mask>."
_lowercase : List[str] = (24, 30)
_lowercase : Any = tokenizer(lowerCamelCase__ , entity_spans=[span] , return_tensors='pt' )
_lowercase : Tuple = model(**lowerCamelCase__ )
_lowercase : List[Any] = encoding["input_ids"][0].tolist()
_lowercase : Union[str, Any] = input_ids.index(tokenizer.convert_tokens_to_ids('<mask>' ) )
_lowercase : Optional[Any] = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(lowerCamelCase__ )
_lowercase : Dict = outputs.entity_logits[0][0].argmax().item()
_lowercase : Optional[int] = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('en:' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('Saving PyTorch model to {}'.format(lowerCamelCase__ ) )
model.save_pretrained(lowerCamelCase__ )
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> List[str]:
_lowercase : int = ["[MASK]", "[PAD]", "[UNK]"]
_lowercase : Dict = [json.loads(lowerCamelCase__ ) for line in open(lowerCamelCase__ )]
_lowercase : str = {}
for entry in data:
_lowercase : Any = entry["id"]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
_lowercase : int = entity_id
break
_lowercase : Union[str, Any] = F"""{language}:{entity_name}"""
_lowercase : List[str] = entity_id
return new_mapping
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
UpperCamelCase = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 710 |
from collections.abc import Sequence
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float:
return sum(c * (x**i) for i, c in enumerate(SCREAMING_SNAKE_CASE ) )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float:
_lowercase : Optional[Any] = 0.0
for coeff in reversed(SCREAMING_SNAKE_CASE ):
_lowercase : Optional[int] = result * x + coeff
return result
if __name__ == "__main__":
UpperCamelCase = (0.0, 0.0, 5.0, 9.3, 7.0)
UpperCamelCase = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 677 | 0 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=1_3 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=9_9 , _lowerCAmelCase=3_2 , _lowerCAmelCase=2 , _lowerCAmelCase=4 , _lowerCAmelCase=3_7 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=5_1_2 , _lowerCAmelCase=1_6 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=None , _lowerCAmelCase=0 , ):
_lowercase : Dict = parent
_lowercase : str = batch_size
_lowercase : str = seq_length
_lowercase : List[str] = is_training
_lowercase : Optional[Any] = use_input_mask
_lowercase : Optional[Any] = use_token_type_ids
_lowercase : List[Any] = use_labels
_lowercase : Any = vocab_size
_lowercase : List[Any] = hidden_size
_lowercase : Dict = num_hidden_layers
_lowercase : Union[str, Any] = num_attention_heads
_lowercase : Optional[int] = intermediate_size
_lowercase : str = hidden_act
_lowercase : str = hidden_dropout_prob
_lowercase : Dict = attention_probs_dropout_prob
_lowercase : str = max_position_embeddings
_lowercase : Union[str, Any] = type_vocab_size
_lowercase : List[str] = type_sequence_label_size
_lowercase : Optional[int] = initializer_range
_lowercase : Any = num_labels
_lowercase : int = num_choices
_lowercase : List[Any] = scope
_lowercase : Optional[int] = projection_dim
def __a ( self ):
_lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase : List[Any] = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
_lowercase : int = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase : Dict = None
if self.use_token_type_ids:
_lowercase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowercase : Tuple = None
_lowercase : Optional[Any] = None
_lowercase : Optional[Any] = None
if self.use_labels:
_lowercase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowercase : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
_lowercase : Tuple = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__A , initializer_range=self.initializer_range , )
_lowercase : Union[str, Any] = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Tuple = TFDPRContextEncoder(config=__A )
_lowercase : Any = model(__A , attention_mask=__A , token_type_ids=__A )
_lowercase : Dict = model(__A , token_type_ids=__A )
_lowercase : Any = model(__A )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Any = TFDPRQuestionEncoder(config=__A )
_lowercase : Optional[Any] = model(__A , attention_mask=__A , token_type_ids=__A )
_lowercase : Tuple = model(__A , token_type_ids=__A )
_lowercase : List[Any] = model(__A )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[int] = TFDPRReader(config=__A )
_lowercase : int = model(__A , attention_mask=__A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def __a ( self ):
_lowercase : Tuple = self.prepare_config_and_inputs()
(
_lowercase
) : Optional[int] = config_and_inputs
_lowercase : Optional[Any] = {"input_ids": input_ids}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( __snake_case , __snake_case , unittest.TestCase ):
_UpperCamelCase : Optional[int] = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
_UpperCamelCase : Dict = {"feature-extraction": TFDPRQuestionEncoder} if is_tf_available() else {}
_UpperCamelCase : Tuple = False
_UpperCamelCase : Any = False
_UpperCamelCase : Tuple = False
_UpperCamelCase : Any = False
_UpperCamelCase : List[str] = False
def __a ( self ):
_lowercase : Dict = TFDPRModelTester(self )
_lowercase : Any = ConfigTester(self , config_class=__A , hidden_size=3_7 )
def __a ( self ):
self.config_tester.run_common_tests()
def __a ( self ):
_lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*__A )
def __a ( self ):
_lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*__A )
def __a ( self ):
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*__A )
@slow
def __a ( self ):
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : str = TFDPRContextEncoder.from_pretrained(__A )
self.assertIsNotNone(__A )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : Optional[int] = TFDPRContextEncoder.from_pretrained(__A )
self.assertIsNotNone(__A )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : str = TFDPRQuestionEncoder.from_pretrained(__A )
self.assertIsNotNone(__A )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : Tuple = TFDPRReader.from_pretrained(__A )
self.assertIsNotNone(__A )
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def __a ( self ):
_lowercase : int = TFDPRQuestionEncoder.from_pretrained('facebook/dpr-question_encoder-single-nq-base' )
_lowercase : Optional[int] = tf.constant(
[[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_0_3, 2_0_2_6, 3_8_9_9, 1_0_1_4_0, 1_0_2_9, 1_0_2]] ) # [CLS] hello, is my dog cute? [SEP]
_lowercase : Optional[Any] = model(__A )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
_lowercase : Tuple = tf.constant(
[
[
0.03_23_62_53,
0.12_75_33_35,
0.16_81_85_09,
0.00_27_97_86,
0.3_89_69_33,
0.24_26_49_45,
0.2_17_89_71,
-0.02_33_52_27,
-0.08_48_19_59,
-0.14_32_41_17,
]
] )
self.assertTrue(numpy.allclose(output[:, :1_0].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 711 |
from __future__ import annotations
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase=None ):
_lowercase : int = data
_lowercase : Union[str, Any] = None
def __repr__( self ):
_lowercase : Dict = []
_lowercase : Tuple = self
while temp:
string_rep.append(F"""{temp.data}""" )
_lowercase : Optional[Any] = temp.next
return "->".join(_lowerCAmelCase )
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Any:
if not elements_list:
raise Exception('The Elements List is empty' )
_lowercase : Union[str, Any] = Node(elements_list[0] )
for i in range(1 , len(SCREAMING_SNAKE_CASE ) ):
_lowercase : Optional[int] = Node(elements_list[i] )
_lowercase : List[Any] = current.next
return head
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> None:
if head_node is not None and isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
print_reverse(head_node.next )
print(head_node.data )
def __magic_name__ ( ) -> List[str]:
from doctest import testmod
testmod()
_lowercase : int = make_linked_list([14, 52, 14, 12, 43] )
print('Linked List:' )
print(SCREAMING_SNAKE_CASE )
print('Elements in Reverse:' )
print_reverse(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 677 | 0 |
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=1_3 , _lowerCAmelCase=3_0 , _lowerCAmelCase=2 , _lowerCAmelCase=3 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=3_2 , _lowerCAmelCase=2 , _lowerCAmelCase=4 , _lowerCAmelCase=3_7 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=1_0 , _lowerCAmelCase=0.02 , _lowerCAmelCase=3 , _lowerCAmelCase=None , ):
_lowercase : Tuple = parent
_lowercase : Optional[Any] = batch_size
_lowercase : Tuple = image_size
_lowercase : Optional[Any] = patch_size
_lowercase : str = num_channels
_lowercase : Union[str, Any] = is_training
_lowercase : int = use_labels
_lowercase : str = hidden_size
_lowercase : Optional[int] = num_hidden_layers
_lowercase : Optional[Any] = num_attention_heads
_lowercase : int = intermediate_size
_lowercase : Dict = hidden_act
_lowercase : Optional[int] = hidden_dropout_prob
_lowercase : Optional[int] = attention_probs_dropout_prob
_lowercase : Dict = type_sequence_label_size
_lowercase : int = initializer_range
_lowercase : Tuple = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowercase : Dict = (image_size // patch_size) ** 2
_lowercase : Dict = num_patches + 1
def __a ( self ):
_lowercase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowercase : Union[str, Any] = None
if self.use_labels:
_lowercase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase : str = self.get_config()
return config, pixel_values, labels
def __a ( self ):
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_a , initializer_range=self.initializer_range , )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : str = TFViTModel(config=_a )
_lowercase : Optional[int] = model(_a , training=_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
_lowercase : Tuple = self.image_size // 2
_lowercase : List[Any] = pixel_values[:, :, :image_size, :image_size]
_lowercase : Tuple = model(_a , interpolate_pos_encoding=_a , training=_a )
_lowercase : List[Any] = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Union[str, Any] = self.type_sequence_label_size
_lowercase : List[str] = TFViTForImageClassification(_a )
_lowercase : Union[str, Any] = model(_a , labels=_a , training=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
_lowercase : Union[str, Any] = self.image_size // 2
_lowercase : Optional[Any] = pixel_values[:, :, :image_size, :image_size]
_lowercase : Union[str, Any] = model(_a , interpolate_pos_encoding=_a , training=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowercase : Optional[Any] = 1
_lowercase : Tuple = TFViTForImageClassification(_a )
_lowercase : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowercase : Optional[int] = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __a ( self ):
_lowercase : Dict = self.prepare_config_and_inputs()
_lowercase : Any = config_and_inputs
_lowercase : List[str] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
_UpperCamelCase : List[str] = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
_UpperCamelCase : Dict = (
{"""feature-extraction""": TFViTModel, """image-classification""": TFViTForImageClassification}
if is_tf_available()
else {}
)
_UpperCamelCase : int = False
_UpperCamelCase : List[str] = False
_UpperCamelCase : Optional[Any] = False
def __a ( self ):
_lowercase : int = TFViTModelTester(self )
_lowercase : Tuple = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=3_7 )
def __a ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def __a ( self ):
pass
@unittest.skip(reason='ViT does not use inputs_embeds' )
def __a ( self ):
pass
def __a ( self ):
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : Union[str, Any] = model_class(_a )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
_lowercase : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a , tf.keras.layers.Layer ) )
def __a ( self ):
_lowercase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : Dict = model_class(_a )
_lowercase : List[str] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase : Tuple = [*signature.parameters.keys()]
_lowercase : Union[str, Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _a )
def __a ( self ):
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __a ( self ):
_lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def __a ( self ):
_lowercase : Union[str, Any] = TFViTModel.from_pretrained('google/vit-base-patch16-224' )
self.assertIsNotNone(_a )
def __magic_name__ ( ) -> Dict:
_lowercase : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
@cached_property
def __a ( self ):
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None
@slow
def __a ( self ):
_lowercase : int = TFViTForImageClassification.from_pretrained('google/vit-base-patch16-224' )
_lowercase : Optional[Any] = self.default_image_processor
_lowercase : int = prepare_img()
_lowercase : Optional[int] = image_processor(images=_a , return_tensors='tf' )
# forward pass
_lowercase : Tuple = model(**_a )
# verify the logits
_lowercase : Dict = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _a )
_lowercase : Dict = tf.constant([-0.27_44, 0.82_15, -0.08_36] )
tf.debugging.assert_near(outputs.logits[0, :3] , _a , atol=1E-4 )
| 712 |
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
UpperCamelCase = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
UpperCamelCase = typing.Union[np.floataa, int, float] # noqa: UP007
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> VectorOut:
return np.sqrt(np.sum((np.asarray(SCREAMING_SNAKE_CASE ) - np.asarray(SCREAMING_SNAKE_CASE )) ** 2 ) )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> VectorOut:
return sum((va - va) ** 2 for va, va in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) ** (1 / 2)
if __name__ == "__main__":
def __magic_name__ ( ) -> None:
from timeit import timeit
print('Without Numpy' )
print(
timeit(
'euclidean_distance_no_np([1, 2, 3], [4, 5, 6])' , number=10_000 , globals=globals() , ) )
print('With Numpy' )
print(
timeit(
'euclidean_distance([1, 2, 3], [4, 5, 6])' , number=10_000 , globals=globals() , ) )
benchmark()
| 677 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.