code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
def _SCREAMING_SNAKE_CASE ( snake_case_ : float , snake_case_ : float ):
if mass < 0:
raise ValueError('''The mass of a body cannot be negative''' )
return 0.5 * mass * abs(snake_case_ ) * abs(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) | 678 |
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
a_ : str = logging.get_logger(__name__)
a_ : Union[str, Any] = 'https://openaipublic.azureedge.net/jukebox/models/'
a_ : List[Any] = {
'jukebox-1b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'1b_lyrics/prior_level_2.pth.tar',
],
'jukebox-5b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'5b_lyrics/prior_level_2.pth.tar',
],
}
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] ):
if key.endswith('''.model.1.bias''' ) and len(key.split('''.''' ) ) > 10:
__magic_name__ = key.replace('''.model.1.bias''' , '''.conv1d_1.bias''' )
elif key.endswith('''.model.1.weight''' ) and len(key.split('''.''' ) ) > 10:
__magic_name__ = key.replace('''.model.1.weight''' , '''.conv1d_1.weight''' )
elif key.endswith('''.model.3.bias''' ) and len(key.split('''.''' ) ) > 10:
__magic_name__ = key.replace('''.model.3.bias''' , '''.conv1d_2.bias''' )
elif key.endswith('''.model.3.weight''' ) and len(key.split('''.''' ) ) > 10:
__magic_name__ = key.replace('''.model.3.weight''' , '''.conv1d_2.weight''' )
if "conditioner_blocks.0." in key:
__magic_name__ = key.replace('''conditioner_blocks.0''' , '''conditioner_blocks''' )
if "prime_prior" in key:
__magic_name__ = key.replace('''prime_prior''' , '''encoder''' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
__magic_name__ = key.replace('''.emb.''' , '''.''' )
if key.endswith('''k''' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('''.k''' , '''.codebook''' )
if "y_emb." in key:
return key.replace('''y_emb.''' , '''metadata_embedding.''' )
if "x_emb.emb." in key:
__magic_name__ = key.replace('''0.x_emb.emb''' , '''embed_tokens''' )
if "prime_state_ln" in key:
return key.replace('''prime_state_ln''' , '''encoder.final_layer_norm''' )
if ".ln" in key:
return key.replace('''.ln''' , '''.layer_norm''' )
if "_ln" in key:
return key.replace('''_ln''' , '''_layer_norm''' )
if "prime_state_proj" in key:
return key.replace('''prime_state_proj''' , '''encoder.proj_in''' )
if "prime_x_out" in key:
return key.replace('''prime_x_out''' , '''encoder.lm_head''' )
if "prior.x_out" in key:
return key.replace('''x_out''' , '''fc_proj_out''' )
if "x_emb" in key:
return key.replace('''x_emb''' , '''embed_tokens''' )
return key
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : Optional[int] ):
__magic_name__ = {}
import re
__magic_name__ = re.compile(r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
__magic_name__ = re.compile(
r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
__magic_name__ = re.compile(r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
__magic_name__ = re.compile(r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
__magic_name__ = re.compile(
r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
__magic_name__ = re.compile(r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
__magic_name__ = re.compile(r'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)''' )
__magic_name__ = re.compile(
r'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
__magic_name__ = re.compile(r'''conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)''' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(snake_case_ ):
__magic_name__ = re_encoder_block_conv_in.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[2] ) * 2 + int(groups[3] )
__magic_name__ = f'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'
__magic_name__ = re_encoder_block_conv_in.sub(snake_case_ , snake_case_ )
elif re_encoder_block_resnet.fullmatch(snake_case_ ):
__magic_name__ = re_encoder_block_resnet.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[2] ) * 2 + int(groups[3] )
__magic_name__ = {'''1''': 1, '''3''': 2}[groups[-2]]
__magic_name__ = f'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'
__magic_name__ = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
__magic_name__ = prefix + resnet_block
__magic_name__ = re_encoder_block_resnet.sub(snake_case_ , snake_case_ )
elif re_encoder_block_proj_out.fullmatch(snake_case_ ):
__magic_name__ = re_encoder_block_proj_out.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = f'encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'
__magic_name__ = re_encoder_block_proj_out.sub(snake_case_ , snake_case_ )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(snake_case_ ):
__magic_name__ = re_decoder_block_conv_out.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[2] ) * 2 + int(groups[3] ) - 2
__magic_name__ = f'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'
__magic_name__ = re_decoder_block_conv_out.sub(snake_case_ , snake_case_ )
elif re_decoder_block_resnet.fullmatch(snake_case_ ):
__magic_name__ = re_decoder_block_resnet.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[2] ) * 2 + int(groups[3] ) - 2
__magic_name__ = {'''1''': 1, '''3''': 2}[groups[-2]]
__magic_name__ = f'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'
__magic_name__ = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
__magic_name__ = prefix + resnet_block
__magic_name__ = re_decoder_block_resnet.sub(snake_case_ , snake_case_ )
elif re_decoder_block_proj_in.fullmatch(snake_case_ ):
__magic_name__ = re_decoder_block_proj_in.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = f'decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'
__magic_name__ = re_decoder_block_proj_in.sub(snake_case_ , snake_case_ )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(snake_case_ ):
__magic_name__ = re_prior_cond_conv_out.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[1] ) * 2 + int(groups[2] ) - 2
__magic_name__ = f'conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'
__magic_name__ = re_prior_cond_conv_out.sub(snake_case_ , snake_case_ )
elif re_prior_cond_resnet.fullmatch(snake_case_ ):
__magic_name__ = re_prior_cond_resnet.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[1] ) * 2 + int(groups[2] ) - 2
__magic_name__ = {'''1''': 1, '''3''': 2}[groups[-2]]
__magic_name__ = f'conditioner_blocks.upsampler.upsample_block.{block_index}.'
__magic_name__ = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
__magic_name__ = prefix + resnet_block
__magic_name__ = re_prior_cond_resnet.sub(snake_case_ , snake_case_ )
elif re_prior_cond_proj_in.fullmatch(snake_case_ ):
__magic_name__ = re_prior_cond_proj_in.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = f'conditioner_blocks.upsampler.proj_in.{groups[-1]}'
__magic_name__ = re_prior_cond_proj_in.sub(snake_case_ , snake_case_ )
# keep original key
else:
__magic_name__ = original_key
__magic_name__ = replace_key(snake_case_ )
if f'{key_prefix}.{key}' not in model_state_dict or key is None:
print(f'failed converting {original_key} to {key}, does not match' )
# handle missmatched shape
elif value.shape != model_state_dict[f'{key_prefix}.{key}'].shape:
__magic_name__ = model_state_dict[f'{key_prefix}.{key}']
print(f'{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match' )
__magic_name__ = original_key
__magic_name__ = original_key
__magic_name__ = value
return new_dict
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( snake_case_ : Dict=None , snake_case_ : Any=None ):
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' ):
__magic_name__ = requests.get(f'{PREFIX}{file}' , allow_redirects=snake_case_ )
os.makedirs(f'{pytorch_dump_folder_path}/' , exist_ok=snake_case_ )
open(f'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' , '''wb''' ).write(r.content )
__magic_name__ = MODEL_MAPPING[model_name.split('''/''' )[-1]]
__magic_name__ = JukeboxConfig.from_pretrained(snake_case_ )
__magic_name__ = JukeboxModel(snake_case_ )
__magic_name__ = []
__magic_name__ = {}
for i, dict_name in enumerate(snake_case_ ):
__magic_name__ = torch.load(f'{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}' )['''model''']
__magic_name__ = {}
for k in old_dic.keys():
if k.endswith('''.b''' ):
__magic_name__ = old_dic[k]
elif k.endswith('''.w''' ):
__magic_name__ = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
__magic_name__ = old_dic[k]
else:
__magic_name__ = old_dic[k]
__magic_name__ = '''vqvae''' if i == 0 else f'priors.{3 - i}'
__magic_name__ = fix_jukebox_keys(snake_case_ , model.state_dict() , snake_case_ , snake_case_ )
weight_dict.append(snake_case_ )
__magic_name__ = weight_dict.pop(0 )
model.vqvae.load_state_dict(snake_case_ )
for i in range(len(snake_case_ ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
with open(f'{pytorch_dump_folder_path}/mapping.json' , '''w''' ) as txtfile:
json.dump(snake_case_ , snake_case_ )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case_ )
return weight_dict
if __name__ == "__main__":
a_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='jukebox-5b-lyrics',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='jukebox-5b-lyrics-converted',
type=str,
help='Path to the output PyTorch model directory.',
)
a_ : int = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path) | 678 | 1 |
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] ):
# if the collection is empty, returns empty
if collection == []:
return []
# get some information about the collection
__magic_name__ = len(snake_case_ )
__magic_name__ = max(snake_case_ )
__magic_name__ = min(snake_case_ )
# create the counting array
__magic_name__ = coll_max + 1 - coll_min
__magic_name__ = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , snake_case_ ):
__magic_name__ = counting_arr[i] + counting_arr[i - 1]
# create the output collection
__magic_name__ = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , snake_case_ ) ):
__magic_name__ = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
return "".join([chr(snake_case_ ) for i in counting_sort([ord(snake_case_ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string('thisisthestring') == "eghhiiinrsssttt"
a_ : str = input('Enter numbers separated by a comma:\n').strip()
a_ : str = [int(item) for item in user_input.split(',')]
print(counting_sort(unsorted)) | 678 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a_ : int = logging.get_logger(__name__)
a_ : Optional[int] = {
'microsoft/table-transformer-detection': (
'https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'
),
}
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = """table-transformer"""
_a = ["""past_key_values"""]
_a = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , A=True , A=None , A=3 , A=1_00 , A=6 , A=20_48 , A=8 , A=6 , A=20_48 , A=8 , A=0.0 , A=0.0 , A=True , A="relu" , A=2_56 , A=0.1 , A=0.0 , A=0.0 , A=0.02 , A=1.0 , A=False , A="sine" , A="resnet50" , A=True , A=False , A=1 , A=5 , A=2 , A=1 , A=1 , A=5 , A=2 , A=0.1 , **A , ) -> Any:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
__magic_name__ = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(A , A ):
__magic_name__ = backbone_config.get('''model_type''' )
__magic_name__ = CONFIG_MAPPING[backbone_model_type]
__magic_name__ = config_class.from_dict(A )
# set timm attributes to None
__magic_name__ , __magic_name__ , __magic_name__ = None, None, None
__magic_name__ = use_timm_backbone
__magic_name__ = backbone_config
__magic_name__ = num_channels
__magic_name__ = num_queries
__magic_name__ = d_model
__magic_name__ = encoder_ffn_dim
__magic_name__ = encoder_layers
__magic_name__ = encoder_attention_heads
__magic_name__ = decoder_ffn_dim
__magic_name__ = decoder_layers
__magic_name__ = decoder_attention_heads
__magic_name__ = dropout
__magic_name__ = attention_dropout
__magic_name__ = activation_dropout
__magic_name__ = activation_function
__magic_name__ = init_std
__magic_name__ = init_xavier_std
__magic_name__ = encoder_layerdrop
__magic_name__ = decoder_layerdrop
__magic_name__ = encoder_layers
__magic_name__ = auxiliary_loss
__magic_name__ = position_embedding_type
__magic_name__ = backbone
__magic_name__ = use_pretrained_backbone
__magic_name__ = dilation
# Hungarian matcher
__magic_name__ = class_cost
__magic_name__ = bbox_cost
__magic_name__ = giou_cost
# Loss coefficients
__magic_name__ = mask_loss_coefficient
__magic_name__ = dice_loss_coefficient
__magic_name__ = bbox_loss_coefficient
__magic_name__ = giou_loss_coefficient
__magic_name__ = eos_coefficient
super().__init__(is_encoder_decoder=A , **A )
@property
def __A ( self ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def __A ( self ) -> int:
'''simple docstring'''
return self.d_model
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = version.parse("""1.11""" )
@property
def __A ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def __A ( self ) -> float:
'''simple docstring'''
return 1E-5
@property
def __A ( self ) -> int:
'''simple docstring'''
return 12 | 678 | 1 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
a_ : Any = logging.get_logger(__name__)
a_ : Tuple = OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
a_ : Tuple = OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
a_ : Optional[int] = OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
a_ : Dict = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
a_ : Tuple = OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
a_ : Any = OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
a_ : Dict = OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
a_ : str = OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
a_ : Optional[int] = OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
a_ : List[str] = OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
a_ : Union[str, Any] = OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
a_ : str = OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
a_ : Union[str, Any] = OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
a_ : List[str] = OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
a_ : Any = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
a_ : Any = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
a_ : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
a_ : Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
a_ : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
a_ : Optional[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
a_ : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
a_ : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
a_ : Optional[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
a_ : str = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
a_ : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
a_ : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
a_ : Dict = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
a_ : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class SCREAMING_SNAKE_CASE_ ( _BaseAutoModelClass ):
"""simple docstring"""
_a = FLAX_MODEL_MAPPING
a_ : Tuple = auto_class_update(FlaxAutoModel)
class SCREAMING_SNAKE_CASE_ ( _BaseAutoModelClass ):
"""simple docstring"""
_a = FLAX_MODEL_FOR_PRETRAINING_MAPPING
a_ : Dict = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class SCREAMING_SNAKE_CASE_ ( _BaseAutoModelClass ):
"""simple docstring"""
_a = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
a_ : Optional[int] = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class SCREAMING_SNAKE_CASE_ ( _BaseAutoModelClass ):
"""simple docstring"""
_a = FLAX_MODEL_FOR_MASKED_LM_MAPPING
a_ : Union[str, Any] = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class SCREAMING_SNAKE_CASE_ ( _BaseAutoModelClass ):
"""simple docstring"""
_a = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
a_ : Any = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class SCREAMING_SNAKE_CASE_ ( _BaseAutoModelClass ):
"""simple docstring"""
_a = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
a_ : Union[str, Any] = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class SCREAMING_SNAKE_CASE_ ( _BaseAutoModelClass ):
"""simple docstring"""
_a = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
a_ : str = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class SCREAMING_SNAKE_CASE_ ( _BaseAutoModelClass ):
"""simple docstring"""
_a = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
a_ : str = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class SCREAMING_SNAKE_CASE_ ( _BaseAutoModelClass ):
"""simple docstring"""
_a = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
a_ : Optional[Any] = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class SCREAMING_SNAKE_CASE_ ( _BaseAutoModelClass ):
"""simple docstring"""
_a = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
a_ : Tuple = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class SCREAMING_SNAKE_CASE_ ( _BaseAutoModelClass ):
"""simple docstring"""
_a = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
a_ : Dict = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class SCREAMING_SNAKE_CASE_ ( _BaseAutoModelClass ):
"""simple docstring"""
_a = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
a_ : Dict = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class SCREAMING_SNAKE_CASE_ ( _BaseAutoModelClass ):
"""simple docstring"""
_a = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
a_ : List[Any] = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
) | 678 |
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Optional[int] , snake_case_ : Union[str, Any] ):
# Initialise PyTorch model
__magic_name__ = LxmertConfig.from_json_file(snake_case_ )
print(f'Building PyTorch model from configuration: {config}' )
__magic_name__ = LxmertForPreTraining(snake_case_ )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(snake_case_ , snake_case_ , snake_case_ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , snake_case_ )
if __name__ == "__main__":
a_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a_ : Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path) | 678 | 1 |
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = ["""image_processor""", """tokenizer"""]
_a = """BlipImageProcessor"""
_a = """AutoTokenizer"""
def __init__( self , A , A ) -> Dict:
'''simple docstring'''
__magic_name__ = False
super().__init__(A , A )
__magic_name__ = self.image_processor
def __call__( self , A = None , A = None , A = True , A = False , A = None , A = None , A = 0 , A = None , A = None , A = False , A = False , A = False , A = False , A = False , A = True , A = None , **A , ) -> BatchEncoding:
'''simple docstring'''
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None:
__magic_name__ = self.tokenizer
__magic_name__ = self.tokenizer(
text=A , add_special_tokens=A , padding=A , truncation=A , max_length=A , stride=A , pad_to_multiple_of=A , return_attention_mask=A , return_overflowing_tokens=A , return_special_tokens_mask=A , return_offsets_mapping=A , return_token_type_ids=A , return_length=A , verbose=A , return_tensors=A , **A , )
return text_encoding
# add pixel_values
__magic_name__ = self.image_processor(A , return_tensors=A )
if text is not None:
__magic_name__ = self.tokenizer(
text=A , add_special_tokens=A , padding=A , truncation=A , max_length=A , stride=A , pad_to_multiple_of=A , return_attention_mask=A , return_overflowing_tokens=A , return_special_tokens_mask=A , return_offsets_mapping=A , return_token_type_ids=A , return_length=A , verbose=A , return_tensors=A , **A , )
else:
__magic_name__ = None
if text_encoding is not None:
encoding_image_processor.update(A )
return encoding_image_processor
def __A ( self , *A , **A ) -> Dict:
'''simple docstring'''
return self.tokenizer.batch_decode(*A , **A )
def __A ( self , *A , **A ) -> Union[str, Any]:
'''simple docstring'''
return self.tokenizer.decode(*A , **A )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def __A ( self ) -> Any:
'''simple docstring'''
__magic_name__ = self.tokenizer.model_input_names
__magic_name__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) | 678 |
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def _SCREAMING_SNAKE_CASE ( snake_case_ : Tuple , snake_case_ : Union[str, Any] , snake_case_ : List[str] , snake_case_ : Union[str, Any] ):
__magic_name__ = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, nicht wahr?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
__magic_name__ = {
'''wmt16-en-de-dist-12-1''': [28.3, 27.52],
'''wmt16-en-de-dist-6-1''': [27.4, 27.11],
'''wmt16-en-de-12-1''': [26.9, 25.75],
}
__magic_name__ = f'{src_lang}-{tgt_lang}'
__magic_name__ = f'\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "allenai/{model_name}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n'
model_card_dir.mkdir(parents=snake_case_ , exist_ok=snake_case_ )
__magic_name__ = os.path.join(snake_case_ , '''README.md''' )
print(f'Generating {path}' )
with open(snake_case_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(snake_case_ )
# make sure we are under the root of the project
a_ : Tuple = Path(__file__).resolve().parent.parent.parent
a_ : Dict = repo_dir / 'model_cards'
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
a_ : List[str] = model_cards_dir / 'allenai' / model_name
write_model_card(model_card_dir, src_lang='en', tgt_lang='de', model_name=model_name) | 678 | 1 |
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
a_ : Tuple = '0.12' # assumed parallelism: 8
@require_flax
@is_staging_test
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def __A ( cls ) -> Tuple:
'''simple docstring'''
__magic_name__ = TOKEN
HfFolder.save_token(A )
@classmethod
def __A ( cls ) -> Optional[Any]:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='''test-model-flax''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' )
except HTTPError:
pass
def __A ( self ) -> str:
'''simple docstring'''
__magic_name__ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
__magic_name__ = FlaxBertModel(A )
model.push_to_hub('''test-model-flax''' , use_auth_token=self._token )
__magic_name__ = FlaxBertModel.from_pretrained(F'{USER}/test-model-flax' )
__magic_name__ = flatten_dict(unfreeze(model.params ) )
__magic_name__ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__magic_name__ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(A , 1E-3 , msg=F'{key} not identical' )
# Reset repo
delete_repo(token=self._token , repo_id='''test-model-flax''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(A , repo_id='''test-model-flax''' , push_to_hub=A , use_auth_token=self._token )
__magic_name__ = FlaxBertModel.from_pretrained(F'{USER}/test-model-flax' )
__magic_name__ = flatten_dict(unfreeze(model.params ) )
__magic_name__ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__magic_name__ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(A , 1E-3 , msg=F'{key} not identical' )
def __A ( self ) -> int:
'''simple docstring'''
__magic_name__ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
__magic_name__ = FlaxBertModel(A )
model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token )
__magic_name__ = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
__magic_name__ = flatten_dict(unfreeze(model.params ) )
__magic_name__ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__magic_name__ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(A , 1E-3 , msg=F'{key} not identical' )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
A , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=A , use_auth_token=self._token )
__magic_name__ = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
__magic_name__ = flatten_dict(unfreeze(model.params ) )
__magic_name__ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__magic_name__ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(A , 1E-3 , msg=F'{key} not identical' )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Optional[Any] ):
__magic_name__ = True
__magic_name__ = flatten_dict(modela.params )
__magic_name__ = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4:
__magic_name__ = False
return models_are_equal
@require_flax
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> Dict:
'''simple docstring'''
__magic_name__ = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
__magic_name__ = FlaxBertModel(A )
__magic_name__ = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(A , A ) )
with self.assertRaises(A ):
__magic_name__ = FlaxBertModel.from_pretrained(A )
__magic_name__ = FlaxBertModel.from_pretrained(A , subfolder=A )
self.assertTrue(check_models_equal(A , A ) )
def __A ( self ) -> List[str]:
'''simple docstring'''
__magic_name__ = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
__magic_name__ = FlaxBertModel(A )
__magic_name__ = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(A , A ) , max_shard_size='''10KB''' )
with self.assertRaises(A ):
__magic_name__ = FlaxBertModel.from_pretrained(A )
__magic_name__ = FlaxBertModel.from_pretrained(A , subfolder=A )
self.assertTrue(check_models_equal(A , A ) )
def __A ( self ) -> int:
'''simple docstring'''
__magic_name__ = '''bert'''
__magic_name__ = '''hf-internal-testing/tiny-random-bert-subfolder'''
with self.assertRaises(A ):
__magic_name__ = FlaxBertModel.from_pretrained(A )
__magic_name__ = FlaxBertModel.from_pretrained(A , subfolder=A )
self.assertIsNotNone(A )
def __A ( self ) -> str:
'''simple docstring'''
__magic_name__ = '''bert'''
__magic_name__ = '''hf-internal-testing/tiny-random-bert-sharded-subfolder'''
with self.assertRaises(A ):
__magic_name__ = FlaxBertModel.from_pretrained(A )
__magic_name__ = FlaxBertModel.from_pretrained(A , subfolder=A )
self.assertIsNotNone(A ) | 678 |
def _SCREAMING_SNAKE_CASE ( snake_case_ : list[int] , snake_case_ : list[int] ):
__magic_name__ = len(snake_case_ )
print('''The following activities are selected:''' )
# The first activity is always selected
__magic_name__ = 0
print(snake_case_ , end=''',''' )
# Consider rest of the activities
for j in range(snake_case_ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(snake_case_ , end=''',''' )
__magic_name__ = j
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ : Dict = [1, 3, 0, 5, 8, 5]
a_ : Union[str, Any] = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish) | 678 | 1 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/text-classification/requirements.txt')
a_ : Optional[Any] = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
_a = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_a = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
_a = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
_a = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
_a = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
_a = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of prediction examples to this """
"""value if set."""
)
} , )
@dataclass
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
_a = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
_a = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """Evaluation language. Also train language if `train_language` is set to None."""} )
_a = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """Train language if it is different from the evaluation language."""} )
_a = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
_a = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
_a = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
_a = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"""} , )
_a = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
_a = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
_a = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
_a = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , )
def _SCREAMING_SNAKE_CASE ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__magic_name__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__magic_name__ , __magic_name__ , __magic_name__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_xnli''' , snake_case_ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__magic_name__ = training_args.get_process_log_level()
logger.setLevel(snake_case_ )
datasets.utils.logging.set_verbosity(snake_case_ )
transformers.utils.logging.set_verbosity(snake_case_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
__magic_name__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__magic_name__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
__magic_name__ = load_dataset(
'''xnli''' , model_args.language , split='''train''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
__magic_name__ = load_dataset(
'''xnli''' , model_args.train_language , split='''train''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
__magic_name__ = train_dataset.features['''label'''].names
if training_args.do_eval:
__magic_name__ = load_dataset(
'''xnli''' , model_args.language , split='''validation''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
__magic_name__ = eval_dataset.features['''label'''].names
if training_args.do_predict:
__magic_name__ = load_dataset(
'''xnli''' , model_args.language , split='''test''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
__magic_name__ = predict_dataset.features['''label'''].names
# Labels
__magic_name__ = len(snake_case_ )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__magic_name__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=snake_case_ , idalabel={str(snake_case_ ): label for i, label in enumerate(snake_case_ )} , labelaid={label: i for i, label in enumerate(snake_case_ )} , finetuning_task='''xnli''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__magic_name__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__magic_name__ = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=snake_case_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
__magic_name__ = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
__magic_name__ = False
def preprocess_function(snake_case_ : Optional[Any] ):
# Tokenize the texts
return tokenizer(
examples['''premise'''] , examples['''hypothesis'''] , padding=snake_case_ , max_length=data_args.max_seq_length , truncation=snake_case_ , )
if training_args.do_train:
if data_args.max_train_samples is not None:
__magic_name__ = min(len(snake_case_ ) , data_args.max_train_samples )
__magic_name__ = train_dataset.select(range(snake_case_ ) )
with training_args.main_process_first(desc='''train dataset map pre-processing''' ):
__magic_name__ = train_dataset.map(
snake_case_ , batched=snake_case_ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on train dataset''' , )
# Log a few random samples from the training set:
for index in random.sample(range(len(snake_case_ ) ) , 3 ):
logger.info(f'Sample {index} of the training set: {train_dataset[index]}.' )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
__magic_name__ = min(len(snake_case_ ) , data_args.max_eval_samples )
__magic_name__ = eval_dataset.select(range(snake_case_ ) )
with training_args.main_process_first(desc='''validation dataset map pre-processing''' ):
__magic_name__ = eval_dataset.map(
snake_case_ , batched=snake_case_ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on validation dataset''' , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
__magic_name__ = min(len(snake_case_ ) , data_args.max_predict_samples )
__magic_name__ = predict_dataset.select(range(snake_case_ ) )
with training_args.main_process_first(desc='''prediction dataset map pre-processing''' ):
__magic_name__ = predict_dataset.map(
snake_case_ , batched=snake_case_ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on prediction dataset''' , )
# Get the metric function
__magic_name__ = evaluate.load('''xnli''' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(snake_case_ : EvalPrediction ):
__magic_name__ = p.predictions[0] if isinstance(p.predictions , snake_case_ ) else p.predictions
__magic_name__ = np.argmax(snake_case_ , axis=1 )
return metric.compute(predictions=snake_case_ , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
__magic_name__ = default_data_collator
elif training_args.fpaa:
__magic_name__ = DataCollatorWithPadding(snake_case_ , pad_to_multiple_of=8 )
else:
__magic_name__ = None
# Initialize our Trainer
__magic_name__ = Trainer(
model=snake_case_ , args=snake_case_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=snake_case_ , tokenizer=snake_case_ , data_collator=snake_case_ , )
# Training
if training_args.do_train:
__magic_name__ = None
if training_args.resume_from_checkpoint is not None:
__magic_name__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__magic_name__ = last_checkpoint
__magic_name__ = trainer.train(resume_from_checkpoint=snake_case_ )
__magic_name__ = train_result.metrics
__magic_name__ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(snake_case_ )
)
__magic_name__ = min(snake_case_ , len(snake_case_ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' , snake_case_ )
trainer.save_metrics('''train''' , snake_case_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__magic_name__ = trainer.evaluate(eval_dataset=snake_case_ )
__magic_name__ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(snake_case_ )
__magic_name__ = min(snake_case_ , len(snake_case_ ) )
trainer.log_metrics('''eval''' , snake_case_ )
trainer.save_metrics('''eval''' , snake_case_ )
# Prediction
if training_args.do_predict:
logger.info('''*** Predict ***''' )
__magic_name__ , __magic_name__ , __magic_name__ = trainer.predict(snake_case_ , metric_key_prefix='''predict''' )
__magic_name__ = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(snake_case_ )
)
__magic_name__ = min(snake_case_ , len(snake_case_ ) )
trainer.log_metrics('''predict''' , snake_case_ )
trainer.save_metrics('''predict''' , snake_case_ )
__magic_name__ = np.argmax(snake_case_ , axis=1 )
__magic_name__ = os.path.join(training_args.output_dir , '''predictions.txt''' )
if trainer.is_world_process_zero():
with open(snake_case_ , '''w''' ) as writer:
writer.write('''index\tprediction\n''' )
for index, item in enumerate(snake_case_ ):
__magic_name__ = label_list[item]
writer.write(f'{index}\t{item}\n' )
if __name__ == "__main__":
main() | 678 |
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
a_ : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
a_ : List[str] = 256
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = ["""melgan"""]
def __init__( self , A , A , A , A , A , ) -> None:
'''simple docstring'''
super().__init__()
# From MELGAN
__magic_name__ = math.log(1E-5 ) # Matches MelGAN training.
__magic_name__ = 4.0 # Largest value for most examples
__magic_name__ = 1_28
self.register_modules(
notes_encoder=A , continuous_encoder=A , decoder=A , scheduler=A , melgan=A , )
def __A ( self , A , A=(-1.0, 1.0) , A=False ) -> List[Any]:
'''simple docstring'''
__magic_name__ , __magic_name__ = output_range
if clip:
__magic_name__ = torch.clip(A , self.min_value , self.max_value )
# Scale to [0, 1].
__magic_name__ = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def __A ( self , A , A=(-1.0, 1.0) , A=False ) -> Optional[int]:
'''simple docstring'''
__magic_name__ , __magic_name__ = input_range
__magic_name__ = torch.clip(A , A , A ) if clip else outputs
# Scale to [0, 1].
__magic_name__ = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def __A ( self , A , A , A ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = input_tokens > 0
__magic_name__ , __magic_name__ = self.notes_encoder(
encoder_input_tokens=A , encoder_inputs_mask=A )
__magic_name__ , __magic_name__ = self.continuous_encoder(
encoder_inputs=A , encoder_inputs_mask=A )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def __A ( self , A , A , A ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = noise_time
if not torch.is_tensor(A ):
__magic_name__ = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(A ) and len(timesteps.shape ) == 0:
__magic_name__ = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__magic_name__ = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
__magic_name__ = self.decoder(
encodings_and_masks=A , decoder_input_tokens=A , decoder_noise_time=A )
return logits
@torch.no_grad()
def __call__( self , A , A = None , A = 1_00 , A = True , A = "numpy" , A = None , A = 1 , ) -> Union[AudioPipelineOutput, Tuple]:
'''simple docstring'''
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A , A ) or callback_steps <= 0)
):
raise ValueError(
F'`callback_steps` has to be a positive integer but is {callback_steps} of type'
F' {type(A )}.' )
__magic_name__ = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
__magic_name__ = np.zeros([1, 0, self.n_dims] , np.floataa )
__magic_name__ = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=A , device=self.device )
for i, encoder_input_tokens in enumerate(A ):
if i == 0:
__magic_name__ = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
__magic_name__ = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=A , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
__magic_name__ = ones
__magic_name__ = self.scale_features(
A , output_range=[-1.0, 1.0] , clip=A )
__magic_name__ = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=A , continuous_mask=A , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
__magic_name__ = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=A , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(A )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__magic_name__ = self.decode(
encodings_and_masks=A , input_tokens=A , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
__magic_name__ = self.scheduler.step(A , A , A , generator=A ).prev_sample
__magic_name__ = self.scale_to_features(A , input_range=[-1.0, 1.0] )
__magic_name__ = mel[:1]
__magic_name__ = mel.cpu().float().numpy()
__magic_name__ = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A , A )
logger.info('''Generated segment''' , A )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'''Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.''' )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'''Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.''' )
if output_type == "numpy":
__magic_name__ = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
__magic_name__ = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=A ) | 678 | 1 |
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __A ( self ) -> int:
'''simple docstring'''
__magic_name__ = tempfile.mkdtemp()
__magic_name__ = 5
# Realm tok
__magic_name__ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''test''',
'''question''',
'''this''',
'''is''',
'''the''',
'''first''',
'''second''',
'''third''',
'''fourth''',
'''fifth''',
'''record''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__magic_name__ = os.path.join(self.tmpdirname , '''realm_tokenizer''' )
os.makedirs(A , exist_ok=A )
__magic_name__ = os.path.join(A , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
__magic_name__ = os.path.join(self.tmpdirname , '''realm_block_records''' )
os.makedirs(A , exist_ok=A )
def __A ( self ) -> RealmTokenizer:
'''simple docstring'''
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''realm_tokenizer''' ) )
def __A ( self ) -> Tuple:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __A ( self ) -> List[Any]:
'''simple docstring'''
__magic_name__ = RealmConfig(num_block_records=self.num_block_records )
return config
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''question''': ['''foo''', '''bar'''],
'''answers''': [['''Foo''', '''Bar'''], ['''Bar''']],
} )
return dataset
def __A ( self ) -> str:
'''simple docstring'''
__magic_name__ = np.array(
[
b'''This is the first record''',
b'''This is the second record''',
b'''This is the third record''',
b'''This is the fourth record''',
b'''This is the fifth record''',
b'''This is a longer longer longer record''',
] , dtype=A , )
return block_records
def __A ( self ) -> str:
'''simple docstring'''
__magic_name__ = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def __A ( self ) -> str:
'''simple docstring'''
__magic_name__ = self.get_config()
__magic_name__ = self.get_dummy_retriever()
__magic_name__ = retriever.tokenizer
__magic_name__ = np.array([0, 3] , dtype='''long''' )
__magic_name__ = tokenizer(['''Test question'''] ).input_ids
__magic_name__ = tokenizer(
['''the fourth'''] , add_special_tokens=A , return_token_type_ids=A , return_attention_mask=A , ).input_ids
__magic_name__ = config.reader_seq_len
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = retriever(
A , A , answer_ids=A , max_length=A , return_tensors='''np''' )
self.assertEqual(len(A ) , 2 )
self.assertEqual(len(A ) , 2 )
self.assertEqual(len(A ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''first''', '''record''', '''[SEP]'''] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''fourth''', '''record''', '''[SEP]'''] , )
def __A ( self ) -> List[str]:
'''simple docstring'''
__magic_name__ = self.get_config()
__magic_name__ = self.get_dummy_retriever()
__magic_name__ = retriever.tokenizer
__magic_name__ = np.array([0, 3, 5] , dtype='''long''' )
__magic_name__ = tokenizer(['''Test question'''] ).input_ids
__magic_name__ = tokenizer(
['''the fourth''', '''longer longer'''] , add_special_tokens=A , return_token_type_ids=A , return_attention_mask=A , ).input_ids
__magic_name__ = config.reader_seq_len
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = retriever(
A , A , answer_ids=A , max_length=A , return_tensors='''np''' )
self.assertEqual([False, True, True] , A )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , A )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , A )
def __A ( self ) -> str:
'''simple docstring'''
__magic_name__ = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
# Test local path
__magic_name__ = retriever.from_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
# Test mocked remote path
with patch('''transformers.models.realm.retrieval_realm.hf_hub_download''' ) as mock_hf_hub_download:
__magic_name__ = os.path.join(
os.path.join(self.tmpdirname , '''realm_block_records''' ) , _REALM_BLOCK_RECORDS_FILENAME )
__magic_name__ = RealmRetriever.from_pretrained('''google/realm-cc-news-pretrained-openqa''' )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' ) | 678 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel | 678 | 1 |
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
a_ : List[Any] = {
'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'bert': (BertConfig, BertForMaskedLM, BertTokenizer),
'gpt2': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def _SCREAMING_SNAKE_CASE ( snake_case_ : Any ):
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def _SCREAMING_SNAKE_CASE ( snake_case_ : Dict , snake_case_ : Union[str, Any] ):
if args.student_type == "roberta":
__magic_name__ = False
elif args.student_type == "gpt2":
__magic_name__ = False
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[str] , snake_case_ : List[str] ):
if args.student_type == "roberta":
__magic_name__ = False
def _SCREAMING_SNAKE_CASE ( ):
__magic_name__ = argparse.ArgumentParser(description='''Training''' )
parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' )
parser.add_argument(
'''--dump_path''' , type=snake_case_ , required=snake_case_ , help='''The output directory (log, checkpoints, parameters, etc.)''' )
parser.add_argument(
'''--data_file''' , type=snake_case_ , required=snake_case_ , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , )
parser.add_argument(
'''--student_type''' , type=snake_case_ , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=snake_case_ , help='''The student type (DistilBERT, RoBERTa).''' , )
parser.add_argument('''--student_config''' , type=snake_case_ , required=snake_case_ , help='''Path to the student configuration.''' )
parser.add_argument(
'''--student_pretrained_weights''' , default=snake_case_ , type=snake_case_ , help='''Load student initialization checkpoint.''' )
parser.add_argument(
'''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=snake_case_ , help='''Teacher type (BERT, RoBERTa).''' )
parser.add_argument('''--teacher_name''' , type=snake_case_ , required=snake_case_ , help='''The teacher model.''' )
parser.add_argument('''--temperature''' , default=2.0 , type=snake_case_ , help='''Temperature for the softmax temperature.''' )
parser.add_argument(
'''--alpha_ce''' , default=0.5 , type=snake_case_ , help='''Linear weight for the distillation loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_mlm''' , default=0.0 , type=snake_case_ , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , )
parser.add_argument('''--alpha_clm''' , default=0.5 , type=snake_case_ , help='''Linear weight for the CLM loss. Must be >=0.''' )
parser.add_argument('''--alpha_mse''' , default=0.0 , type=snake_case_ , help='''Linear weight of the MSE loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_cos''' , default=0.0 , type=snake_case_ , help='''Linear weight of the cosine embedding loss. Must be >=0.''' )
parser.add_argument(
'''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' )
parser.add_argument(
'''--mlm_mask_prop''' , default=0.15 , type=snake_case_ , help='''Proportion of tokens for which we need to make a prediction.''' , )
parser.add_argument('''--word_mask''' , default=0.8 , type=snake_case_ , help='''Proportion of tokens to mask out.''' )
parser.add_argument('''--word_keep''' , default=0.1 , type=snake_case_ , help='''Proportion of tokens to keep.''' )
parser.add_argument('''--word_rand''' , default=0.1 , type=snake_case_ , help='''Proportion of tokens to randomly replace.''' )
parser.add_argument(
'''--mlm_smoothing''' , default=0.7 , type=snake_case_ , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , )
parser.add_argument('''--token_counts''' , type=snake_case_ , help='''The token counts in the data_file for MLM.''' )
parser.add_argument(
'''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , )
parser.add_argument(
'''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , )
parser.add_argument(
'''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , )
parser.add_argument('''--n_epoch''' , type=snake_case_ , default=3 , help='''Number of pass on the whole dataset.''' )
parser.add_argument('''--batch_size''' , type=snake_case_ , default=5 , help='''Batch size (for each process).''' )
parser.add_argument(
'''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=snake_case_ , default=50 , help='''Gradient accumulation for larger training batches.''' , )
parser.add_argument('''--warmup_prop''' , default=0.05 , type=snake_case_ , help='''Linear warmup proportion.''' )
parser.add_argument('''--weight_decay''' , default=0.0 , type=snake_case_ , help='''Weight decay if we apply some.''' )
parser.add_argument('''--learning_rate''' , default=5E-4 , type=snake_case_ , help='''The initial learning rate for Adam.''' )
parser.add_argument('''--adam_epsilon''' , default=1E-6 , type=snake_case_ , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , default=5.0 , type=snake_case_ , help='''Max gradient norm.''' )
parser.add_argument('''--initializer_range''' , default=0.02 , type=snake_case_ , help='''Random initialization range.''' )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=snake_case_ , default='''O1''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_gpu''' , type=snake_case_ , default=1 , help='''Number of GPUs in the node.''' )
parser.add_argument('''--local_rank''' , type=snake_case_ , default=-1 , help='''Distributed training - Local rank''' )
parser.add_argument('''--seed''' , type=snake_case_ , default=56 , help='''Random seed''' )
parser.add_argument('''--log_interval''' , type=snake_case_ , default=500 , help='''Tensorboard logging interval.''' )
parser.add_argument('''--checkpoint_interval''' , type=snake_case_ , default=4000 , help='''Checkpoint interval.''' )
__magic_name__ = parser.parse_args()
sanity_checks(snake_case_ )
# ARGS #
init_gpu_params(snake_case_ )
set_seed(snake_case_ )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
f'Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'
''' itUse `--force` if you want to overwrite it''' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(f'Experiment will be dumped and logged in {args.dump_path}' )
# SAVE PARAMS #
logger.info(f'Param: {args}' )
with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f:
json.dump(vars(snake_case_ ) , snake_case_ , indent=4 )
git_log(args.dump_path )
__magic_name__ , __magic_name__ , __magic_name__ = MODEL_CLASSES[args.student_type]
__magic_name__ , __magic_name__ , __magic_name__ = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
__magic_name__ = teacher_tokenizer_class.from_pretrained(args.teacher_name )
__magic_name__ = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
__magic_name__ = tokenizer.all_special_tokens.index(snake_case_ )
__magic_name__ = tokenizer.all_special_ids[idx]
logger.info(f'Special tokens {special_tok_ids}' )
__magic_name__ = special_tok_ids
__magic_name__ = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f'Loading data from {args.data_file}' )
with open(args.data_file , '''rb''' ) as fp:
__magic_name__ = pickle.load(snake_case_ )
if args.mlm:
logger.info(f'Loading token counts from {args.token_counts} (already pre-computed)' )
with open(args.token_counts , '''rb''' ) as fp:
__magic_name__ = pickle.load(snake_case_ )
__magic_name__ = np.maximum(snake_case_ , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
__magic_name__ = 0.0 # do not predict special tokens
__magic_name__ = torch.from_numpy(snake_case_ )
else:
__magic_name__ = None
__magic_name__ = LmSeqsDataset(params=snake_case_ , data=snake_case_ )
logger.info('''Data loader created.''' )
# STUDENT #
logger.info(f'Loading student config from {args.student_config}' )
__magic_name__ = student_config_class.from_pretrained(args.student_config )
__magic_name__ = True
if args.student_pretrained_weights is not None:
logger.info(f'Loading pretrained weights from {args.student_pretrained_weights}' )
__magic_name__ = student_model_class.from_pretrained(args.student_pretrained_weights , config=snake_case_ )
else:
__magic_name__ = student_model_class(snake_case_ )
if args.n_gpu > 0:
student.to(f'cuda:{args.local_rank}' )
logger.info('''Student loaded.''' )
# TEACHER #
__magic_name__ = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=snake_case_ )
if args.n_gpu > 0:
teacher.to(f'cuda:{args.local_rank}' )
logger.info(f'Teacher loaded from {args.teacher_name}.' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(snake_case_ , snake_case_ )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(snake_case_ , snake_case_ )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
__magic_name__ = Distiller(
params=snake_case_ , dataset=snake_case_ , token_probs=snake_case_ , student=snake_case_ , teacher=snake_case_ )
distiller.train()
logger.info('''Let\'s go get some drinks.''' )
if __name__ == "__main__":
main() | 678 |
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] ):
__magic_name__ = SwinConfig(image_size=192 )
if "base" in model_name:
__magic_name__ = 6
__magic_name__ = 128
__magic_name__ = (2, 2, 18, 2)
__magic_name__ = (4, 8, 16, 32)
elif "large" in model_name:
__magic_name__ = 12
__magic_name__ = 192
__magic_name__ = (2, 2, 18, 2)
__magic_name__ = (6, 12, 24, 48)
else:
raise ValueError('''Model not supported, only supports base and large variants''' )
__magic_name__ = window_size
__magic_name__ = embed_dim
__magic_name__ = depths
__magic_name__ = num_heads
return config
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] ):
if "encoder.mask_token" in name:
__magic_name__ = name.replace('''encoder.mask_token''' , '''embeddings.mask_token''' )
if "encoder.patch_embed.proj" in name:
__magic_name__ = name.replace('''encoder.patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "encoder.patch_embed.norm" in name:
__magic_name__ = name.replace('''encoder.patch_embed.norm''' , '''embeddings.norm''' )
if "attn.proj" in name:
__magic_name__ = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
__magic_name__ = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
__magic_name__ = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
__magic_name__ = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
__magic_name__ = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
__magic_name__ = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
__magic_name__ = '''layernorm.weight'''
if name == "encoder.norm.bias":
__magic_name__ = '''layernorm.bias'''
if "decoder" in name:
pass
else:
__magic_name__ = '''swin.''' + name
return name
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Any ):
for key in orig_state_dict.copy().keys():
__magic_name__ = orig_state_dict.pop(snake_case_ )
if "attn_mask" in key:
pass
elif "qkv" in key:
__magic_name__ = key.split('''.''' )
__magic_name__ = int(key_split[2] )
__magic_name__ = int(key_split[4] )
__magic_name__ = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__magic_name__ = val[:dim, :]
__magic_name__ = val[
dim : dim * 2, :
]
__magic_name__ = val[-dim:, :]
else:
__magic_name__ = val[
:dim
]
__magic_name__ = val[
dim : dim * 2
]
__magic_name__ = val[
-dim:
]
else:
__magic_name__ = val
return orig_state_dict
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[str] , snake_case_ : int , snake_case_ : Any , snake_case_ : str ):
__magic_name__ = torch.load(snake_case_ , map_location='''cpu''' )['''model''']
__magic_name__ = get_swin_config(snake_case_ )
__magic_name__ = SwinForMaskedImageModeling(snake_case_ )
model.eval()
__magic_name__ = convert_state_dict(snake_case_ , snake_case_ )
model.load_state_dict(snake_case_ )
__magic_name__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__magic_name__ = ViTImageProcessor(size={'''height''': 192, '''width''': 192} )
__magic_name__ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
__magic_name__ = image_processor(images=snake_case_ , return_tensors='''pt''' )
with torch.no_grad():
__magic_name__ = model(**snake_case_ ).logits
print(outputs.keys() )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case_ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(snake_case_ )
if push_to_hub:
print(f'Pushing model and image processor for {model_name} to hub' )
model.push_to_hub(f'microsoft/{model_name}' )
image_processor.push_to_hub(f'microsoft/{model_name}' )
if __name__ == "__main__":
a_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='swin-base-simmim-window6-192',
type=str,
choices=['swin-base-simmim-window6-192', 'swin-large-simmim-window12-192'],
help='Name of the Swin SimMIM model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth',
type=str,
help='Path to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
a_ : Optional[Any] = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub) | 678 | 1 |
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __A ( self , A , A ) -> List[Any]:
'''simple docstring'''
__magic_name__ = jnp.ones((batch_size, length) ) / length
return scores
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = None
__magic_name__ = 20
__magic_name__ = self._get_uniform_logits(batch_size=2 , length=A )
# tweak scores to not be uniform anymore
__magic_name__ = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
__magic_name__ = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
__magic_name__ = jax.nn.softmax(A , axis=-1 )
__magic_name__ = FlaxTemperatureLogitsWarper(temperature=0.5 )
__magic_name__ = FlaxTemperatureLogitsWarper(temperature=1.3 )
__magic_name__ = jax.nn.softmax(temp_dist_warper_sharper(A , scores.copy() , cur_len=A ) , axis=-1 )
__magic_name__ = jax.nn.softmax(temp_dist_warper_smoother(A , scores.copy() , cur_len=A ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def __A ( self ) -> int:
'''simple docstring'''
__magic_name__ = None
__magic_name__ = 10
__magic_name__ = 2
# create ramp distribution
__magic_name__ = np.broadcast_to(np.arange(A )[None, :] , (batch_size, vocab_size) ).copy()
__magic_name__ = ramp_logits[1:, : vocab_size // 2] + vocab_size
__magic_name__ = FlaxTopKLogitsWarper(3 )
__magic_name__ = top_k_warp(A , A , cur_len=A )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
__magic_name__ = 5
__magic_name__ = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
__magic_name__ = np.broadcast_to(np.arange(A )[None, :] , (batch_size, length) ).copy()
__magic_name__ = top_k_warp_safety_check(A , A , cur_len=A )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def __A ( self ) -> Dict:
'''simple docstring'''
__magic_name__ = None
__magic_name__ = 10
__magic_name__ = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
__magic_name__ = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
__magic_name__ = FlaxTopPLogitsWarper(0.8 )
__magic_name__ = np.exp(top_p_warp(A , A , cur_len=A ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
__magic_name__ = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(A , A , atol=1E-3 ) )
# check edge cases with negative and extreme logits
__magic_name__ = np.broadcast_to(np.arange(A )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
__magic_name__ = ramp_logits[1] * 1_00.0
# make sure at least 2 tokens are kept
__magic_name__ = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
__magic_name__ = top_p_warp(A , A , cur_len=A )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def __A ( self ) -> int:
'''simple docstring'''
__magic_name__ = 20
__magic_name__ = 4
__magic_name__ = 0
__magic_name__ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=A )
# check that min length is applied at length 5
__magic_name__ = ids_tensor((batch_size, 20) , vocab_size=20 )
__magic_name__ = 5
__magic_name__ = self._get_uniform_logits(A , A )
__magic_name__ = min_dist_processor(A , A , cur_len=A )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''' )] )
# check that min length is not applied anymore at length 15
__magic_name__ = self._get_uniform_logits(A , A )
__magic_name__ = 15
__magic_name__ = min_dist_processor(A , A , cur_len=A )
self.assertFalse(jnp.isinf(A ).any() )
def __A ( self ) -> Any:
'''simple docstring'''
__magic_name__ = 20
__magic_name__ = 4
__magic_name__ = 0
__magic_name__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=A )
# check that all scores are -inf except the bos_token_id score
__magic_name__ = ids_tensor((batch_size, 1) , vocab_size=20 )
__magic_name__ = 1
__magic_name__ = self._get_uniform_logits(A , A )
__magic_name__ = logits_processor(A , A , cur_len=A )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
__magic_name__ = 3
__magic_name__ = self._get_uniform_logits(A , A )
__magic_name__ = logits_processor(A , A , cur_len=A )
self.assertFalse(jnp.isinf(A ).any() )
def __A ( self ) -> List[Any]:
'''simple docstring'''
__magic_name__ = 20
__magic_name__ = 4
__magic_name__ = 0
__magic_name__ = 5
__magic_name__ = FlaxForcedEOSTokenLogitsProcessor(max_length=A , eos_token_id=A )
# check that all scores are -inf except the eos_token_id when max_length is reached
__magic_name__ = ids_tensor((batch_size, 4) , vocab_size=20 )
__magic_name__ = 4
__magic_name__ = self._get_uniform_logits(A , A )
__magic_name__ = logits_processor(A , A , cur_len=A )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
__magic_name__ = 3
__magic_name__ = self._get_uniform_logits(A , A )
__magic_name__ = logits_processor(A , A , cur_len=A )
self.assertFalse(jnp.isinf(A ).any() )
def __A ( self ) -> Dict:
'''simple docstring'''
__magic_name__ = 4
__magic_name__ = 10
__magic_name__ = 15
__magic_name__ = 2
__magic_name__ = 1
__magic_name__ = 15
# dummy input_ids and scores
__magic_name__ = ids_tensor((batch_size, sequence_length) , A )
__magic_name__ = input_ids.copy()
__magic_name__ = self._get_uniform_logits(A , A )
__magic_name__ = scores.copy()
# instantiate all dist processors
__magic_name__ = FlaxTemperatureLogitsWarper(temperature=0.5 )
__magic_name__ = FlaxTopKLogitsWarper(3 )
__magic_name__ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
__magic_name__ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=A )
__magic_name__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=A )
__magic_name__ = FlaxForcedEOSTokenLogitsProcessor(max_length=A , eos_token_id=A )
__magic_name__ = 10
# no processor list
__magic_name__ = temp_dist_warp(A , A , cur_len=A )
__magic_name__ = top_k_warp(A , A , cur_len=A )
__magic_name__ = top_p_warp(A , A , cur_len=A )
__magic_name__ = min_dist_proc(A , A , cur_len=A )
__magic_name__ = bos_dist_proc(A , A , cur_len=A )
__magic_name__ = eos_dist_proc(A , A , cur_len=A )
# with processor list
__magic_name__ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
__magic_name__ = processor(A , A , cur_len=A )
# scores should be equal
self.assertTrue(jnp.allclose(A , A , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def __A ( self ) -> List[str]:
'''simple docstring'''
__magic_name__ = 4
__magic_name__ = 10
__magic_name__ = 15
__magic_name__ = 2
__magic_name__ = 1
__magic_name__ = 15
# dummy input_ids and scores
__magic_name__ = ids_tensor((batch_size, sequence_length) , A )
__magic_name__ = input_ids.copy()
__magic_name__ = self._get_uniform_logits(A , A )
__magic_name__ = scores.copy()
# instantiate all dist processors
__magic_name__ = FlaxTemperatureLogitsWarper(temperature=0.5 )
__magic_name__ = FlaxTopKLogitsWarper(3 )
__magic_name__ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
__magic_name__ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=A )
__magic_name__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=A )
__magic_name__ = FlaxForcedEOSTokenLogitsProcessor(max_length=A , eos_token_id=A )
__magic_name__ = 10
# no processor list
def run_no_processor_list(A , A , A ):
__magic_name__ = temp_dist_warp(A , A , cur_len=A )
__magic_name__ = top_k_warp(A , A , cur_len=A )
__magic_name__ = top_p_warp(A , A , cur_len=A )
__magic_name__ = min_dist_proc(A , A , cur_len=A )
__magic_name__ = bos_dist_proc(A , A , cur_len=A )
__magic_name__ = eos_dist_proc(A , A , cur_len=A )
return scores
# with processor list
def run_processor_list(A , A , A ):
__magic_name__ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
__magic_name__ = processor(A , A , cur_len=A )
return scores
__magic_name__ = jax.jit(A )
__magic_name__ = jax.jit(A )
__magic_name__ = jitted_run_no_processor_list(A , A , A )
__magic_name__ = jitted_run_processor_list(A , A , A )
# scores should be equal
self.assertTrue(jnp.allclose(A , A , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() ) | 678 |
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
return "".join(sorted(snake_case_ ) )
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
return word_by_signature[signature(snake_case_ )]
a_ : str = Path(__file__).parent.joinpath('words.txt').read_text(encoding='utf-8')
a_ : Optional[Any] = sorted({word.strip().lower() for word in data.splitlines()})
a_ : List[Any] = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
a_ : Optional[Any] = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('anagrams.txt', 'w') as file:
file.write('all_anagrams = \n ')
file.write(pprint.pformat(all_anagrams)) | 678 | 1 |
import requests
a_ : List[str] = 'https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey='
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
# fetching a list of articles in json format
__magic_name__ = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page['''articles'''] , 1 ):
print(f'{i}.) {article["title"]}' )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key='<Your BBC News API key goes here>') | 678 |
from __future__ import annotations
from scipy.special import comb # type: ignore
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , A ) -> Tuple:
'''simple docstring'''
__magic_name__ = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
__magic_name__ = len(A ) - 1
def __A ( self , A ) -> list[float]:
'''simple docstring'''
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__magic_name__ = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , A ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(A ) , 5 ) == 1
return output_values
def __A ( self , A ) -> tuple[float, float]:
'''simple docstring'''
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__magic_name__ = self.basis_function(A )
__magic_name__ = 0.0
__magic_name__ = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def __A ( self , A = 0.01 ) -> Tuple:
'''simple docstring'''
from matplotlib import pyplot as plt # type: ignore
__magic_name__ = [] # x coordinates of points to plot
__magic_name__ = [] # y coordinates of points to plot
__magic_name__ = 0.0
while t <= 1:
__magic_name__ = self.bezier_curve_function(A )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
__magic_name__ = [i[0] for i in self.list_of_points]
__magic_name__ = [i[1] for i in self.list_of_points]
plt.plot(
A , A , color='''blue''' , label='''Curve of Degree ''' + str(self.degree ) , )
plt.scatter(A , A , color='''red''' , label='''Control Points''' )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3 | 678 | 1 |
import math
def _SCREAMING_SNAKE_CASE ( snake_case_ : int ):
__magic_name__ = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(snake_case_ )
def _SCREAMING_SNAKE_CASE ( snake_case_ : float = 1 / 1_2345 ):
__magic_name__ = 0
__magic_name__ = 0
__magic_name__ = 3
while True:
__magic_name__ = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(snake_case_ ):
__magic_name__ = int(snake_case_ )
total_partitions += 1
if check_partition_perfect(snake_case_ ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(snake_case_ )
integer += 1
if __name__ == "__main__":
print(F"""{solution() = }""") | 678 |
import re
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
__magic_name__ = re.compile(
r'''^(?:0|94|\+94|0{2}94)''' r'''7(0|1|2|4|5|6|7|8)''' r'''(-| |)''' r'''\d{7}$''' )
return bool(re.search(snake_case_ , snake_case_ ) )
if __name__ == "__main__":
a_ : Optional[int] = '0094702343221'
print(is_sri_lankan_phone_number(phone)) | 678 | 1 |
def _SCREAMING_SNAKE_CASE ( snake_case_ : int ):
if not isinstance(snake_case_ , snake_case_ ):
__magic_name__ = f'Input value of [number={number}] must be an integer'
raise TypeError(snake_case_ )
if number < 1:
__magic_name__ = f'Input value of [number={number}] must be > 0'
raise ValueError(snake_case_ )
__magic_name__ = 1
for i in range(1 , snake_case_ ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod() | 678 |
import os
import sys
import unittest
a_ : int = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
a_ : Optional[Any] = os.path.join(git_repo_path, 'src', 'diffusers')
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = find_backend(''' if not is_torch_available():''' )
self.assertEqual(A , '''torch''' )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
__magic_name__ = find_backend(''' if not (is_torch_available() and is_transformers_available()):''' )
self.assertEqual(A , '''torch_and_transformers''' )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
__magic_name__ = find_backend(
''' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):''' )
self.assertEqual(A , '''torch_and_transformers_and_onnx''' )
def __A ( self ) -> str:
'''simple docstring'''
__magic_name__ = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , A )
self.assertIn('''torch_and_transformers''' , A )
self.assertIn('''flax_and_transformers''' , A )
self.assertIn('''torch_and_transformers_and_onnx''' , A )
# Likewise, we can't assert on the exact content of a key
self.assertIn('''UNet2DModel''' , objects['''torch'''] )
self.assertIn('''FlaxUNet2DConditionModel''' , objects['''flax'''] )
self.assertIn('''StableDiffusionPipeline''' , objects['''torch_and_transformers'''] )
self.assertIn('''FlaxStableDiffusionPipeline''' , objects['''flax_and_transformers'''] )
self.assertIn('''LMSDiscreteScheduler''' , objects['''torch_and_scipy'''] )
self.assertIn('''OnnxStableDiffusionPipeline''' , objects['''torch_and_transformers_and_onnx'''] )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = create_dummy_object('''CONSTANT''' , '''\'torch\'''' )
self.assertEqual(A , '''\nCONSTANT = None\n''' )
__magic_name__ = create_dummy_object('''function''' , '''\'torch\'''' )
self.assertEqual(
A , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' )
__magic_name__ = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
'''
__magic_name__ = create_dummy_object('''FakeClass''' , '''\'torch\'''' )
self.assertEqual(A , A )
def __A ( self ) -> int:
'''simple docstring'''
__magic_name__ = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
'''
__magic_name__ = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} )
self.assertEqual(dummy_files['''torch'''] , A ) | 678 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ : Union[str, Any] = {
'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'],
'tokenization_convbert': ['ConvBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Tuple = ['ConvBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[Any] = [
'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvBertForMaskedLM',
'ConvBertForMultipleChoice',
'ConvBertForQuestionAnswering',
'ConvBertForSequenceClassification',
'ConvBertForTokenClassification',
'ConvBertLayer',
'ConvBertModel',
'ConvBertPreTrainedModel',
'load_tf_weights_in_convbert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : str = [
'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFConvBertForMaskedLM',
'TFConvBertForMultipleChoice',
'TFConvBertForQuestionAnswering',
'TFConvBertForSequenceClassification',
'TFConvBertForTokenClassification',
'TFConvBertLayer',
'TFConvBertModel',
'TFConvBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
a_ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 678 |
def _SCREAMING_SNAKE_CASE ( snake_case_ : list[list[int]] , snake_case_ : int , snake_case_ : int , snake_case_ : set ):
__magic_name__ , __magic_name__ = len(snake_case_ ), len(grid[0] )
if (
min(snake_case_ , snake_case_ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
__magic_name__ = 0
count += depth_first_search(snake_case_ , row + 1 , snake_case_ , snake_case_ )
count += depth_first_search(snake_case_ , row - 1 , snake_case_ , snake_case_ )
count += depth_first_search(snake_case_ , snake_case_ , col + 1 , snake_case_ )
count += depth_first_search(snake_case_ , snake_case_ , col - 1 , snake_case_ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod() | 678 | 1 |
import colorsys
from PIL import Image # type: ignore
def _SCREAMING_SNAKE_CASE ( snake_case_ : float , snake_case_ : float , snake_case_ : int ):
__magic_name__ = x
__magic_name__ = y
for step in range(snake_case_ ): # noqa: B007
__magic_name__ = a * a - b * b + x
__magic_name__ = 2 * a * b + y
__magic_name__ = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def _SCREAMING_SNAKE_CASE ( snake_case_ : float ):
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def _SCREAMING_SNAKE_CASE ( snake_case_ : float ):
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(snake_case_ , 1 , 1 ) )
def _SCREAMING_SNAKE_CASE ( snake_case_ : int = 800 , snake_case_ : int = 600 , snake_case_ : float = -0.6 , snake_case_ : float = 0 , snake_case_ : float = 3.2 , snake_case_ : int = 50 , snake_case_ : bool = True , ):
__magic_name__ = Image.new('''RGB''' , (image_width, image_height) )
__magic_name__ = img.load()
# loop through the image-coordinates
for image_x in range(snake_case_ ):
for image_y in range(snake_case_ ):
# determine the figure-coordinates based on the image-coordinates
__magic_name__ = figure_width / image_width * image_height
__magic_name__ = figure_center_x + (image_x / image_width - 0.5) * figure_width
__magic_name__ = figure_center_y + (image_y / image_height - 0.5) * figure_height
__magic_name__ = get_distance(snake_case_ , snake_case_ , snake_case_ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
__magic_name__ = get_color_coded_rgb(snake_case_ )
else:
__magic_name__ = get_black_and_white_rgb(snake_case_ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
a_ : Any = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show() | 678 |
a_ : Dict = {
'meter': 'm',
'kilometer': 'km',
'megametre': 'Mm',
'gigametre': 'Gm',
'terametre': 'Tm',
'petametre': 'Pm',
'exametre': 'Em',
'zettametre': 'Zm',
'yottametre': 'Ym',
}
# Exponent of the factor(meter)
a_ : str = {
'm': 0,
'km': 3,
'Mm': 6,
'Gm': 9,
'Tm': 12,
'Pm': 15,
'Em': 18,
'Zm': 21,
'Ym': 24,
}
def _SCREAMING_SNAKE_CASE ( snake_case_ : float , snake_case_ : str , snake_case_ : str ):
__magic_name__ = from_type.lower().strip('''s''' )
__magic_name__ = to_type.lower().strip('''s''' )
__magic_name__ = UNIT_SYMBOL.get(snake_case_ , snake_case_ )
__magic_name__ = UNIT_SYMBOL.get(snake_case_ , snake_case_ )
if from_sanitized not in METRIC_CONVERSION:
__magic_name__ = (
f'Invalid \'from_type\' value: {from_type!r}.\n'
f'Conversion abbreviations are: {", ".join(snake_case_ )}'
)
raise ValueError(snake_case_ )
if to_sanitized not in METRIC_CONVERSION:
__magic_name__ = (
f'Invalid \'to_type\' value: {to_type!r}.\n'
f'Conversion abbreviations are: {", ".join(snake_case_ )}'
)
raise ValueError(snake_case_ )
__magic_name__ = METRIC_CONVERSION[from_sanitized]
__magic_name__ = METRIC_CONVERSION[to_sanitized]
__magic_name__ = 1
if from_exponent > to_exponent:
__magic_name__ = from_exponent - to_exponent
else:
__magic_name__ = -(to_exponent - from_exponent)
return value * pow(10 , snake_case_ )
if __name__ == "__main__":
from doctest import testmod
testmod() | 678 | 1 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.17.0.dev0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/text-classification/requirements.txt')
a_ : Optional[Any] = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
_a = field(
default="""tab_fact""" , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
_a = field(
default="""tab_fact""" , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} , )
_a = field(
default=1024 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_a = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
_a = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
_a = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
_a = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
_a = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of prediction examples to this """
"""value if set."""
)
} , )
_a = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """A csv or a json file containing the training data."""} )
_a = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """A csv or a json file containing the validation data."""} )
_a = field(default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """A csv or a json file containing the test data."""} )
def __A ( self ) -> Tuple:
'''simple docstring'''
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('''Need either a GLUE task, a training/validation file or a dataset name.''' )
else:
__magic_name__ = self.train_file.split('''.''' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
__magic_name__ = self.validation_file.split('''.''' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
_a = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
_a = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
_a = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
_a = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
_a = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
_a = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
_a = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
def _SCREAMING_SNAKE_CASE ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__magic_name__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__magic_name__ , __magic_name__ , __magic_name__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__magic_name__ , __magic_name__ , __magic_name__ = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
__magic_name__ = training_args.get_process_log_level()
logger.setLevel(snake_case_ )
datasets.utils.logging.set_verbosity(snake_case_ )
transformers.utils.logging.set_verbosity(snake_case_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
__magic_name__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__magic_name__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__magic_name__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
__magic_name__ = {'''train''': data_args.train_file, '''validation''': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
__magic_name__ = data_args.train_file.split('''.''' )[-1]
__magic_name__ = data_args.test_file.split('''.''' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
__magic_name__ = data_args.test_file
else:
raise ValueError('''Need either a GLUE task or a test file for `do_predict`.''' )
for key in data_files.keys():
logger.info(f'load a local file for {key}: {data_files[key]}' )
if data_args.train_file.endswith('''.csv''' ):
# Loading a dataset from local csv files
__magic_name__ = load_dataset('''csv''' , data_files=snake_case_ , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
__magic_name__ = load_dataset('''json''' , data_files=snake_case_ , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
__magic_name__ = raw_datasets['''train'''].features['''label'''].names
__magic_name__ = len(snake_case_ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__magic_name__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=snake_case_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
__magic_name__ = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=snake_case_ , )
__magic_name__ = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=snake_case_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
__magic_name__ = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
__magic_name__ = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
__magic_name__ = {'''Refused''': 0, '''Entailed''': 1}
__magic_name__ = {0: '''Refused''', 1: '''Entailed'''}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'
f'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' )
__magic_name__ = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(snake_case_ : int ):
# Tokenize the texts
def _convert_table_text_to_pandas(snake_case_ : int ):
__magic_name__ = [_table_row.split('''#''' ) for _table_row in _table_text.strip('''\n''' ).split('''\n''' )]
__magic_name__ = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
__magic_name__ = examples['''statement''']
__magic_name__ = list(map(_convert_table_text_to_pandas , examples['''table_text'''] ) )
__magic_name__ = tokenizer(snake_case_ , snake_case_ , padding=snake_case_ , max_length=snake_case_ , truncation=snake_case_ )
__magic_name__ = examples['''label''']
return result
with training_args.main_process_first(desc='''dataset map pre-processing''' ):
__magic_name__ = raw_datasets.map(
snake_case_ , batched=snake_case_ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on dataset''' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
__magic_name__ = raw_datasets['''train''']
if data_args.max_train_samples is not None:
__magic_name__ = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
__magic_name__ = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
__magic_name__ = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('''--do_predict requires a test dataset''' )
__magic_name__ = raw_datasets['''test''']
if data_args.max_predict_samples is not None:
__magic_name__ = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(snake_case_ ) ) , 3 ):
logger.info(f'Sample {index} of the training set: {train_dataset[index]}.' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(snake_case_ : EvalPrediction ):
__magic_name__ = p.predictions[0] if isinstance(p.predictions , snake_case_ ) else p.predictions
__magic_name__ = np.argmax(snake_case_ , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
__magic_name__ = default_data_collator
elif training_args.fpaa:
__magic_name__ = DataCollatorWithPadding(snake_case_ , pad_to_multiple_of=8 )
else:
__magic_name__ = None
# Initialize our Trainer
__magic_name__ = Trainer(
model=snake_case_ , args=snake_case_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=snake_case_ , tokenizer=snake_case_ , data_collator=snake_case_ , )
# Training
if training_args.do_train:
__magic_name__ = None
if training_args.resume_from_checkpoint is not None:
__magic_name__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__magic_name__ = last_checkpoint
__magic_name__ = trainer.train(resume_from_checkpoint=snake_case_ )
__magic_name__ = train_result.metrics
__magic_name__ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(snake_case_ )
)
__magic_name__ = min(snake_case_ , len(snake_case_ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' , snake_case_ )
trainer.save_metrics('''train''' , snake_case_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__magic_name__ = trainer.evaluate(eval_dataset=snake_case_ )
__magic_name__ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(snake_case_ )
__magic_name__ = min(snake_case_ , len(snake_case_ ) )
trainer.log_metrics('''eval''' , snake_case_ )
trainer.save_metrics('''eval''' , snake_case_ )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
__magic_name__ = predict_dataset.remove_columns('''label''' )
__magic_name__ = trainer.predict(snake_case_ , metric_key_prefix='''predict''' ).predictions
__magic_name__ = np.argmax(snake_case_ , axis=1 )
__magic_name__ = os.path.join(training_args.output_dir , '''predict_results_tabfact.txt''' )
if trainer.is_world_process_zero():
with open(snake_case_ , '''w''' ) as writer:
logger.info('''***** Predict Results *****''' )
writer.write('''index\tprediction\n''' )
for index, item in enumerate(snake_case_ ):
__magic_name__ = label_list[item]
writer.write(f'{index}\t{item}\n' )
__magic_name__ = {'''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''text-classification'''}
if training_args.push_to_hub:
trainer.push_to_hub(**snake_case_ )
else:
trainer.create_model_card(**snake_case_ )
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[str] ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 678 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
a_ : Union[str, Any] = {
'configuration_longt5': ['LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LongT5Config', 'LongT5OnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : int = [
'LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'LongT5EncoderModel',
'LongT5ForConditionalGeneration',
'LongT5Model',
'LongT5PreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Dict = [
'FlaxLongT5ForConditionalGeneration',
'FlaxLongT5Model',
'FlaxLongT5PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
a_ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 678 | 1 |
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] ):
__magic_name__ = SwinConfig(image_size=192 )
if "base" in model_name:
__magic_name__ = 6
__magic_name__ = 128
__magic_name__ = (2, 2, 18, 2)
__magic_name__ = (4, 8, 16, 32)
elif "large" in model_name:
__magic_name__ = 12
__magic_name__ = 192
__magic_name__ = (2, 2, 18, 2)
__magic_name__ = (6, 12, 24, 48)
else:
raise ValueError('''Model not supported, only supports base and large variants''' )
__magic_name__ = window_size
__magic_name__ = embed_dim
__magic_name__ = depths
__magic_name__ = num_heads
return config
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] ):
if "encoder.mask_token" in name:
__magic_name__ = name.replace('''encoder.mask_token''' , '''embeddings.mask_token''' )
if "encoder.patch_embed.proj" in name:
__magic_name__ = name.replace('''encoder.patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "encoder.patch_embed.norm" in name:
__magic_name__ = name.replace('''encoder.patch_embed.norm''' , '''embeddings.norm''' )
if "attn.proj" in name:
__magic_name__ = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
__magic_name__ = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
__magic_name__ = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
__magic_name__ = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
__magic_name__ = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
__magic_name__ = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
__magic_name__ = '''layernorm.weight'''
if name == "encoder.norm.bias":
__magic_name__ = '''layernorm.bias'''
if "decoder" in name:
pass
else:
__magic_name__ = '''swin.''' + name
return name
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Any ):
for key in orig_state_dict.copy().keys():
__magic_name__ = orig_state_dict.pop(snake_case_ )
if "attn_mask" in key:
pass
elif "qkv" in key:
__magic_name__ = key.split('''.''' )
__magic_name__ = int(key_split[2] )
__magic_name__ = int(key_split[4] )
__magic_name__ = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__magic_name__ = val[:dim, :]
__magic_name__ = val[
dim : dim * 2, :
]
__magic_name__ = val[-dim:, :]
else:
__magic_name__ = val[
:dim
]
__magic_name__ = val[
dim : dim * 2
]
__magic_name__ = val[
-dim:
]
else:
__magic_name__ = val
return orig_state_dict
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[str] , snake_case_ : int , snake_case_ : Any , snake_case_ : str ):
__magic_name__ = torch.load(snake_case_ , map_location='''cpu''' )['''model''']
__magic_name__ = get_swin_config(snake_case_ )
__magic_name__ = SwinForMaskedImageModeling(snake_case_ )
model.eval()
__magic_name__ = convert_state_dict(snake_case_ , snake_case_ )
model.load_state_dict(snake_case_ )
__magic_name__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__magic_name__ = ViTImageProcessor(size={'''height''': 192, '''width''': 192} )
__magic_name__ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
__magic_name__ = image_processor(images=snake_case_ , return_tensors='''pt''' )
with torch.no_grad():
__magic_name__ = model(**snake_case_ ).logits
print(outputs.keys() )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case_ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(snake_case_ )
if push_to_hub:
print(f'Pushing model and image processor for {model_name} to hub' )
model.push_to_hub(f'microsoft/{model_name}' )
image_processor.push_to_hub(f'microsoft/{model_name}' )
if __name__ == "__main__":
a_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='swin-base-simmim-window6-192',
type=str,
choices=['swin-base-simmim-window6-192', 'swin-large-simmim-window12-192'],
help='Name of the Swin SimMIM model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth',
type=str,
help='Path to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
a_ : Optional[Any] = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub) | 678 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , A , A=13 , A=7 , A=True , A=True , A=True , A=99 , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=5_12 , A=16 , A=2 , A=0.02 , A=3 , A=4 , A=None , ) -> str:
'''simple docstring'''
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = seq_length
__magic_name__ = is_training
__magic_name__ = use_token_type_ids
__magic_name__ = use_labels
__magic_name__ = vocab_size
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = intermediate_size
__magic_name__ = hidden_act
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = max_position_embeddings
__magic_name__ = type_vocab_size
__magic_name__ = type_sequence_label_size
__magic_name__ = initializer_range
__magic_name__ = num_labels
__magic_name__ = num_choices
__magic_name__ = scope
__magic_name__ = self.vocab_size - 1
def __A ( self ) -> str:
'''simple docstring'''
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ = None
if self.use_token_type_ids:
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
if self.use_labels:
__magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
__magic_name__ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def __A ( self , A , A , A , A , *A ) -> Tuple:
'''simple docstring'''
__magic_name__ = OpenAIGPTModel(config=A )
model.to(A )
model.eval()
__magic_name__ = model(A , token_type_ids=A , head_mask=A )
__magic_name__ = model(A , token_type_ids=A )
__magic_name__ = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , A , A , A , A , *A ) -> Dict:
'''simple docstring'''
__magic_name__ = OpenAIGPTLMHeadModel(A )
model.to(A )
model.eval()
__magic_name__ = model(A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , A , A , A , A , *A ) -> List[Any]:
'''simple docstring'''
__magic_name__ = OpenAIGPTDoubleHeadsModel(A )
model.to(A )
model.eval()
__magic_name__ = model(A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , A , A , A , A , *A ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = self.num_labels
__magic_name__ = OpenAIGPTForSequenceClassification(A )
model.to(A )
model.eval()
__magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ = model(A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self ) -> Dict:
'''simple docstring'''
__magic_name__ = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) = config_and_inputs
__magic_name__ = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_a = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
_a = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
_a = (
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def __A ( self , A , A , A , A , A ) -> List[str]:
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def __A ( self , A , A , A=False ) -> List[str]:
'''simple docstring'''
__magic_name__ = super()._prepare_for_class(A , A , return_labels=A )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
__magic_name__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=A , )
__magic_name__ = inputs_dict['''labels''']
__magic_name__ = inputs_dict['''labels''']
__magic_name__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=A , )
__magic_name__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A )
return inputs_dict
def __A ( self ) -> str:
'''simple docstring'''
__magic_name__ = OpenAIGPTModelTester(self )
__magic_name__ = ConfigTester(self , config_class=A , n_embd=37 )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __A ( self ) -> Any:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*A )
def __A ( self ) -> Dict:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*A )
def __A ( self ) -> List[str]:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*A )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*A )
@slow
def __A ( self ) -> List[str]:
'''simple docstring'''
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ = OpenAIGPTModel.from_pretrained(A )
self.assertIsNotNone(A )
@require_torch
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __A ( self ) -> Tuple:
'''simple docstring'''
__magic_name__ = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' )
model.to(A )
__magic_name__ = torch.tensor([[4_81, 47_35, 5_44]] , dtype=torch.long , device=A ) # the president is
__magic_name__ = [
4_81,
47_35,
5_44,
2_46,
9_63,
8_70,
7_62,
2_39,
2_44,
4_04_77,
2_44,
2_49,
7_19,
8_81,
4_87,
5_44,
2_40,
2_44,
6_03,
4_81,
] # the president is a very good man. " \n " i\'m sure he is, " said the
__magic_name__ = model.generate(A , do_sample=A )
self.assertListEqual(output_ids[0].tolist() , A ) | 678 | 1 |
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_a = OpenAIGPTTokenizer
_a = OpenAIGPTTokenizerFast
_a = True
_a = False
def __A ( self ) -> str:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__magic_name__ = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
__magic_name__ = dict(zip(A , range(len(A ) ) ) )
__magic_name__ = ['''#version: 0.2''', '''l o''', '''lo w''', '''e r</w>''', '''''']
__magic_name__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__magic_name__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(A ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(A ) )
def __A ( self , A ) -> str:
'''simple docstring'''
return "lower newer", "lower newer"
def __A ( self ) -> Dict:
'''simple docstring'''
__magic_name__ = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
__magic_name__ = '''lower'''
__magic_name__ = ['''low''', '''er</w>''']
__magic_name__ = tokenizer.tokenize(A )
self.assertListEqual(A , A )
__magic_name__ = tokens + ['''<unk>''']
__magic_name__ = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A )
def __A ( self , A=15 ) -> List[str]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__magic_name__ = self.rust_tokenizer_class.from_pretrained(A , **A )
# Simple input
__magic_name__ = '''This is a simple input'''
__magic_name__ = ['''This is a simple input 1''', '''This is a simple input 2''']
__magic_name__ = ('''This is a simple input''', '''This is a pair''')
__magic_name__ = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(A , tokenizer_r.encode , A , max_length=A , padding='''max_length''' )
# Simple input
self.assertRaises(A , tokenizer_r.encode_plus , A , max_length=A , padding='''max_length''' )
# Simple input
self.assertRaises(
A , tokenizer_r.batch_encode_plus , A , max_length=A , padding='''max_length''' , )
# Pair input
self.assertRaises(A , tokenizer_r.encode , A , max_length=A , padding='''max_length''' )
# Pair input
self.assertRaises(A , tokenizer_r.encode_plus , A , max_length=A , padding='''max_length''' )
# Pair input
self.assertRaises(
A , tokenizer_r.batch_encode_plus , A , max_length=A , padding='''max_length''' , )
def __A ( self ) -> Dict:
'''simple docstring'''
pass
@require_ftfy
@require_spacy
@require_tokenizers
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
pass | 678 |
def _SCREAMING_SNAKE_CASE ( ):
__magic_name__ = []
__magic_name__ = 1
while len(snake_case_ ) < 1E6:
constant.append(str(snake_case_ ) )
i += 1
__magic_name__ = ''''''.join(snake_case_ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[9_9999] )
* int(constant[99_9999] )
)
if __name__ == "__main__":
print(solution()) | 678 | 1 |
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = [r"""h\.\d+\.attn\.bias""", r"""h\.\d+\.attn\.masked_bias"""]
@register_to_config
def __init__( self , A , A , A = None , A = 5_02_57 , A = 10_24 , A = 7_68 , A = 12 , A = 12 , A = None , A = "gelu_new" , A = 0.1 , A = 0.1 , A = 0.1 , A = 1E-5 , A = 0.02 , A = True , A = True , A = False , A = False , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
__magic_name__ = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
F'`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'
F' `n_embd`: {n_embd} are not equal.' )
__magic_name__ = prefix_inner_dim
__magic_name__ = prefix_hidden_dim
__magic_name__ = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
__magic_name__ = (
nn.Linear(self.prefix_hidden_dim , A ) if self.prefix_hidden_dim is not None else nn.Identity()
)
__magic_name__ = GPTaConfig(
vocab_size=A , n_positions=A , n_embd=A , n_layer=A , n_head=A , n_inner=A , activation_function=A , resid_pdrop=A , embd_pdrop=A , attn_pdrop=A , layer_norm_epsilon=A , initializer_range=A , scale_attn_weights=A , use_cache=A , scale_attn_by_inverse_layer_idx=A , reorder_and_upcast_attn=A , )
__magic_name__ = GPTaLMHeadModel(A )
def __A ( self , A , A , A = None , A = None , ) -> List[Any]:
'''simple docstring'''
__magic_name__ = self.transformer.transformer.wte(A )
__magic_name__ = self.encode_prefix(A )
__magic_name__ = self.decode_prefix(A )
__magic_name__ = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
__magic_name__ = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
__magic_name__ = torch.cat((dummy_token, input_ids) , dim=1 )
__magic_name__ = self.transformer(inputs_embeds=A , labels=A , attention_mask=A )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def __A ( self , A , A ) -> torch.Tensor:
'''simple docstring'''
return torch.zeros(A , self.prefix_length , dtype=torch.intaa , device=A )
def __A ( self , A ) -> Optional[int]:
'''simple docstring'''
return self.encode_prefix(A )
@torch.no_grad()
def __A ( self , A , A , A ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = torch.split(A , 1 , dim=0 )
__magic_name__ = []
__magic_name__ = []
for feature in features:
__magic_name__ = self.decode_prefix(feature.to(A ) ) # back to the clip feature
# Only support beam search for now
__magic_name__ , __magic_name__ = self.generate_beam(
input_embeds=A , device=A , eos_token_id=A )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
__magic_name__ = torch.stack(A )
__magic_name__ = torch.stack(A )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def __A ( self , A=None , A=None , A=None , A = 5 , A = 67 , A = 1.0 , A = None , ) -> List[Any]:
'''simple docstring'''
__magic_name__ = eos_token_id
__magic_name__ = None
__magic_name__ = None
__magic_name__ = torch.ones(A , device=A , dtype=torch.int )
__magic_name__ = torch.zeros(A , device=A , dtype=torch.bool )
if input_embeds is not None:
__magic_name__ = input_embeds
else:
__magic_name__ = self.transformer.transformer.wte(A )
for i in range(A ):
__magic_name__ = self.transformer(inputs_embeds=A )
__magic_name__ = outputs.logits
__magic_name__ = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
__magic_name__ = logits.softmax(-1 ).log()
if scores is None:
__magic_name__ , __magic_name__ = logits.topk(A , -1 )
__magic_name__ = generated.expand(A , *generated.shape[1:] )
__magic_name__ , __magic_name__ = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
__magic_name__ = next_tokens
else:
__magic_name__ = tokens.expand(A , *tokens.shape[1:] )
__magic_name__ = torch.cat((tokens, next_tokens) , dim=1 )
else:
__magic_name__ = -float(np.inf )
__magic_name__ = 0
__magic_name__ = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
__magic_name__ = scores_sum / seq_lengths[:, None]
__magic_name__ , __magic_name__ = scores_sum_average.view(-1 ).topk(A , -1 )
__magic_name__ = next_tokens // scores_sum.shape[1]
__magic_name__ = seq_lengths[next_tokens_source]
__magic_name__ = next_tokens % scores_sum.shape[1]
__magic_name__ = next_tokens.unsqueeze(1 )
__magic_name__ = tokens[next_tokens_source]
__magic_name__ = torch.cat((tokens, next_tokens) , dim=1 )
__magic_name__ = generated[next_tokens_source]
__magic_name__ = scores_sum_average * seq_lengths
__magic_name__ = is_stopped[next_tokens_source]
__magic_name__ = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
__magic_name__ = torch.cat((generated, next_token_embed) , dim=1 )
__magic_name__ = is_stopped + next_tokens.eq(A ).squeeze()
if is_stopped.all():
break
__magic_name__ = scores / seq_lengths
__magic_name__ = scores.argsort(descending=A )
# tokens tensors are already padded to max_seq_length
__magic_name__ = [tokens[i] for i in order]
__magic_name__ = torch.stack(A , dim=0 )
__magic_name__ = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths | 678 |
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
a_ : str = True
except ImportError:
a_ : Optional[int] = False
try:
from torch.hub import _get_torch_home
a_ : Optional[Any] = _get_torch_home()
except ImportError:
a_ : List[Any] = os.path.expanduser(
os.getenv('TORCH_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch'))
)
a_ : Any = os.path.join(torch_cache_home, 'transformers')
a_ : Any = 'https://cdn.huggingface.co'
a_ : Any = 'https://s3.amazonaws.com/models.huggingface.co/bert'
a_ : int = '/'.join(str(Path(__file__).resolve()).split('/')[:-1])
a_ : Any = os.path.join(PATH, 'config.yaml')
a_ : Any = os.path.join(PATH, 'attributes.txt')
a_ : Any = os.path.join(PATH, 'objects.txt')
a_ : List[Any] = os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', default_cache_path)
a_ : Any = os.getenv('PYTORCH_TRANSFORMERS_CACHE', PYTORCH_PRETRAINED_BERT_CACHE)
a_ : Optional[int] = os.getenv('TRANSFORMERS_CACHE', PYTORCH_TRANSFORMERS_CACHE)
a_ : int = 'pytorch_model.bin'
a_ : Union[str, Any] = 'config.yaml'
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any]=OBJECTS , snake_case_ : str=ATTRIBUTES ):
__magic_name__ = []
with open(snake_case_ ) as f:
for object in f.readlines():
vg_classes.append(object.split(''',''' )[0].lower().strip() )
__magic_name__ = []
with open(snake_case_ ) as f:
for object in f.readlines():
vg_attrs.append(object.split(''',''' )[0].lower().strip() )
return vg_classes, vg_attrs
def _SCREAMING_SNAKE_CASE ( snake_case_ : int ):
__magic_name__ = OrderedDict()
with open(snake_case_ , '''rb''' ) as f:
__magic_name__ = pkl.load(snake_case_ )['''model''']
for k in copy.deepcopy(list(ckp.keys() ) ):
__magic_name__ = ckp.pop(snake_case_ )
if isinstance(snake_case_ , np.ndarray ):
__magic_name__ = torch.tensor(snake_case_ )
else:
assert isinstance(snake_case_ , torch.tensor ), type(snake_case_ )
__magic_name__ = v
return r
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
_a = {}
def __init__( self , A , A = "root" , A=0 ) -> List[str]:
'''simple docstring'''
__magic_name__ = name
__magic_name__ = level
__magic_name__ = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
__magic_name__ = copy.deepcopy(A )
__magic_name__ = copy.deepcopy(A )
if isinstance(A , A ):
__magic_name__ = Config(A , name=A , level=level + 1 )
__magic_name__ = v
setattr(self , A , A )
__magic_name__ = d
def __repr__( self ) -> Union[str, Any]:
'''simple docstring'''
return str(list((self._pointer.keys()) ) )
def __setattr__( self , A , A ) -> Tuple:
'''simple docstring'''
__magic_name__ = val
__magic_name__ = val
__magic_name__ = key.split('''.''' )
__magic_name__ = len(A ) - 1
__magic_name__ = self._pointer
if len(A ) > 1:
for i, l in enumerate(A ):
if hasattr(self , A ) and isinstance(getattr(self , A ) , A ):
setattr(getattr(self , A ) , '''.'''.join(levels[i:] ) , A )
if l == last_level:
__magic_name__ = val
else:
__magic_name__ = pointer[l]
def __A ( self ) -> List[Any]:
'''simple docstring'''
return self._pointer
def __A ( self , A , A ) -> Any:
'''simple docstring'''
with open(F'{file_name}' , '''w''' ) as stream:
dump(A , A )
def __A ( self , A , A ) -> List[Any]:
'''simple docstring'''
with open(F'{file_name}' , '''w''' ) as stream:
json.dump(A , A )
@staticmethod
def __A ( A ) -> Optional[Any]:
'''simple docstring'''
with open(A ) as stream:
__magic_name__ = load(A , Loader=A )
return data
def __str__( self ) -> List[Any]:
'''simple docstring'''
__magic_name__ = ''' '''
if self._name != "root":
__magic_name__ = F'{t * (self._level-1)}{self._name}:\n'
else:
__magic_name__ = ''''''
__magic_name__ = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(A , A ):
r += F'{t * (self._level)}{v}\n'
self._level += 1
else:
r += F'{t * (self._level)}{k}: {v} ({type(A ).__name__})\n'
__magic_name__ = level
return r[:-1]
@classmethod
def __A ( cls , A , **A ) -> int:
'''simple docstring'''
__magic_name__ , __magic_name__ = cls.get_config_dict(A , **A )
return cls(A )
@classmethod
def __A ( cls , A , **A ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = kwargs.pop('''cache_dir''' , A )
__magic_name__ = kwargs.pop('''force_download''' , A )
__magic_name__ = kwargs.pop('''resume_download''' , A )
__magic_name__ = kwargs.pop('''proxies''' , A )
__magic_name__ = kwargs.pop('''local_files_only''' , A )
if os.path.isdir(A ):
__magic_name__ = os.path.join(A , A )
elif os.path.isfile(A ) or is_remote_url(A ):
__magic_name__ = pretrained_model_name_or_path
else:
__magic_name__ = hf_bucket_url(A , filename=A , use_cdn=A )
try:
# Load from URL or cache if already cached
__magic_name__ = cached_path(
A , cache_dir=A , force_download=A , proxies=A , resume_download=A , local_files_only=A , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
__magic_name__ = Config.load_yaml(A )
except EnvironmentError:
__magic_name__ = '''Can\'t load config for'''
raise EnvironmentError(A )
if resolved_config_file == config_file:
print('''loading configuration file from path''' )
else:
print('''loading configuration file cache''' )
return Config.load_yaml(A ), kwargs
def _SCREAMING_SNAKE_CASE ( snake_case_ : Tuple ):
__magic_name__ = torch.load('''dump.pt''' , map_location=in_tensor.device )
__magic_name__ = in_tensor.numpy()
__magic_name__ = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(snake_case_ , snake_case_ , rtol=0.01 , atol=0.1 ), (
f'{sum([1 for x in np.isclose(snake_case_ , snake_case_ , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %'
" element-wise mismatch"
)
raise Exception('''tensors are all good''' )
# Hugging face functions below
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] ):
__magic_name__ = urlparse(snake_case_ )
return parsed.scheme in ("http", "https")
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : str , snake_case_ : Optional[Any]=True ):
__magic_name__ = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
__magic_name__ = '''/''' not in model_id
if legacy_format:
return f'{endpoint}/{model_id}-{filename}'
else:
return f'{endpoint}/{model_id}/{filename}'
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Tuple , snake_case_ : List[str]=None , snake_case_ : Dict=0 , snake_case_ : Tuple=None , ):
__magic_name__ = '''python/{}'''.format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(snake_case_ , snake_case_ ):
ua += "; " + "; ".join('''{}/{}'''.format(snake_case_ , snake_case_ ) for k, v in user_agent.items() )
elif isinstance(snake_case_ , snake_case_ ):
ua += "; " + user_agent
__magic_name__ = {'''user-agent''': ua}
if resume_size > 0:
__magic_name__ = '''bytes=%d-''' % (resume_size,)
__magic_name__ = requests.get(snake_case_ , stream=snake_case_ , proxies=snake_case_ , headers=snake_case_ )
if response.status_code == 416: # Range not satisfiable
return
__magic_name__ = response.headers.get('''Content-Length''' )
__magic_name__ = resume_size + int(snake_case_ ) if content_length is not None else None
__magic_name__ = tqdm(
unit='''B''' , unit_scale=snake_case_ , total=snake_case_ , initial=snake_case_ , desc='''Downloading''' , )
for chunk in response.iter_content(chunk_size=1024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(snake_case_ ) )
temp_file.write(snake_case_ )
progress.close()
def _SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : Dict=None , snake_case_ : int=False , snake_case_ : List[Any]=None , snake_case_ : Tuple=10 , snake_case_ : int=False , snake_case_ : Any=None , snake_case_ : Tuple=False , ):
if cache_dir is None:
__magic_name__ = TRANSFORMERS_CACHE
if isinstance(snake_case_ , snake_case_ ):
__magic_name__ = str(snake_case_ )
os.makedirs(snake_case_ , exist_ok=snake_case_ )
__magic_name__ = None
if not local_files_only:
try:
__magic_name__ = requests.head(snake_case_ , allow_redirects=snake_case_ , proxies=snake_case_ , timeout=snake_case_ )
if response.status_code == 200:
__magic_name__ = response.headers.get('''ETag''' )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
__magic_name__ = url_to_filename(snake_case_ , snake_case_ )
# get cache path to put the file
__magic_name__ = os.path.join(snake_case_ , snake_case_ )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(snake_case_ ):
return cache_path
else:
__magic_name__ = [
file
for file in fnmatch.filter(os.listdir(snake_case_ ) , filename + '''.*''' )
if not file.endswith('''.json''' ) and not file.endswith('''.lock''' )
]
if len(snake_case_ ) > 0:
return os.path.join(snake_case_ , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
'''Cannot find the requested files in the cached path and outgoing traffic has been'''
''' disabled. To enable model look-ups and downloads online, set \'local_files_only\''''
''' to False.''' )
return None
# From now on, etag is not None.
if os.path.exists(snake_case_ ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
__magic_name__ = cache_path + '''.lock'''
with FileLock(snake_case_ ):
# If the download just completed while the lock was activated.
if os.path.exists(snake_case_ ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
__magic_name__ = cache_path + '''.incomplete'''
@contextmanager
def _resumable_file_manager():
with open(snake_case_ , '''a+b''' ) as f:
yield f
__magic_name__ = _resumable_file_manager
if os.path.exists(snake_case_ ):
__magic_name__ = os.stat(snake_case_ ).st_size
else:
__magic_name__ = 0
else:
__magic_name__ = partial(tempfile.NamedTemporaryFile , dir=snake_case_ , delete=snake_case_ )
__magic_name__ = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
'''%s not found in cache or force_download set to True, downloading to %s''' , snake_case_ , temp_file.name , )
http_get(
snake_case_ , snake_case_ , proxies=snake_case_ , resume_size=snake_case_ , user_agent=snake_case_ , )
os.replace(temp_file.name , snake_case_ )
__magic_name__ = {'''url''': url, '''etag''': etag}
__magic_name__ = cache_path + '''.json'''
with open(snake_case_ , '''w''' ) as meta_file:
json.dump(snake_case_ , snake_case_ )
return cache_path
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] , snake_case_ : List[Any]=None ):
__magic_name__ = url.encode('''utf-8''' )
__magic_name__ = shaaaa(snake_case_ )
__magic_name__ = url_hash.hexdigest()
if etag:
__magic_name__ = etag.encode('''utf-8''' )
__magic_name__ = shaaaa(snake_case_ )
filename += "." + etag_hash.hexdigest()
if url.endswith('''.h5''' ):
filename += ".h5"
return filename
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : str=None , snake_case_ : Tuple=False , snake_case_ : Union[str, Any]=None , snake_case_ : List[Any]=False , snake_case_ : Union[str, Any]=None , snake_case_ : List[str]=False , snake_case_ : Optional[int]=False , snake_case_ : Optional[int]=False , ):
if cache_dir is None:
__magic_name__ = TRANSFORMERS_CACHE
if isinstance(snake_case_ , snake_case_ ):
__magic_name__ = str(snake_case_ )
if isinstance(snake_case_ , snake_case_ ):
__magic_name__ = str(snake_case_ )
if is_remote_url(snake_case_ ):
# URL, so get it from the cache (downloading if necessary)
__magic_name__ = get_from_cache(
snake_case_ , cache_dir=snake_case_ , force_download=snake_case_ , proxies=snake_case_ , resume_download=snake_case_ , user_agent=snake_case_ , local_files_only=snake_case_ , )
elif os.path.exists(snake_case_ ):
# File, and it exists.
__magic_name__ = url_or_filename
elif urlparse(snake_case_ ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError('''file {} not found'''.format(snake_case_ ) )
else:
# Something unknown
raise ValueError('''unable to parse {} as a URL or as a local path'''.format(snake_case_ ) )
if extract_compressed_file:
if not is_zipfile(snake_case_ ) and not tarfile.is_tarfile(snake_case_ ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
__magic_name__ , __magic_name__ = os.path.split(snake_case_ )
__magic_name__ = output_file.replace('''.''' , '''-''' ) + '''-extracted'''
__magic_name__ = os.path.join(snake_case_ , snake_case_ )
if os.path.isdir(snake_case_ ) and os.listdir(snake_case_ ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
__magic_name__ = output_path + '''.lock'''
with FileLock(snake_case_ ):
shutil.rmtree(snake_case_ , ignore_errors=snake_case_ )
os.makedirs(snake_case_ )
if is_zipfile(snake_case_ ):
with ZipFile(snake_case_ , '''r''' ) as zip_file:
zip_file.extractall(snake_case_ )
zip_file.close()
elif tarfile.is_tarfile(snake_case_ ):
__magic_name__ = tarfile.open(snake_case_ )
tar_file.extractall(snake_case_ )
tar_file.close()
else:
raise EnvironmentError('''Archive format of {} could not be identified'''.format(snake_case_ ) )
return output_path_extracted
return output_path
def _SCREAMING_SNAKE_CASE ( snake_case_ : Dict , snake_case_ : int="," ):
assert isinstance(snake_case_ , snake_case_ )
if os.path.isfile(snake_case_ ):
with open(snake_case_ ) as f:
__magic_name__ = eval(f.read() )
else:
__magic_name__ = requests.get(snake_case_ )
try:
__magic_name__ = requests.json()
except Exception:
__magic_name__ = req.content.decode()
assert data is not None, "could not connect"
try:
__magic_name__ = eval(snake_case_ )
except Exception:
__magic_name__ = data.split('''\n''' )
req.close()
return data
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] ):
__magic_name__ = requests.get(snake_case_ )
__magic_name__ = np.array(Image.open(BytesIO(response.content ) ) )
return img
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] ):
__magic_name__ = url.split('''/''' )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(snake_case_ )
with open(snake_case_ , '''rb''' ) as stream:
__magic_name__ = pkl.load(snake_case_ )
__magic_name__ = weights.pop('''model''' )
__magic_name__ = {}
for k, v in model.items():
__magic_name__ = torch.from_numpy(snake_case_ )
if "running_var" in k:
__magic_name__ = torch.tensor([0] )
__magic_name__ = k.replace('''running_var''' , '''num_batches_tracked''' )
__magic_name__ = zero
return new
def _SCREAMING_SNAKE_CASE ( ):
print(f'{os.path.abspath(os.path.join(snake_case_ , os.pardir ) )}/demo.ipynb' )
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Tuple="RGB" ):
assert isinstance(snake_case_ , snake_case_ )
if os.path.isfile(snake_case_ ):
__magic_name__ = cva.imread(snake_case_ )
else:
__magic_name__ = get_image_from_url(snake_case_ )
assert img is not None, f'could not connect to: {im}'
__magic_name__ = cva.cvtColor(snake_case_ , cva.COLOR_BGR2RGB )
if input_format == "RGB":
__magic_name__ = img[:, :, ::-1]
return img
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Dict=1 ):
return (images[i : i + batch] for i in range(0 , len(snake_case_ ) , snake_case_ )) | 678 | 1 |
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
a_ : str = logging.get_logger(__name__)
a_ : Union[str, Any] = 'https://openaipublic.azureedge.net/jukebox/models/'
a_ : List[Any] = {
'jukebox-1b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'1b_lyrics/prior_level_2.pth.tar',
],
'jukebox-5b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'5b_lyrics/prior_level_2.pth.tar',
],
}
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] ):
if key.endswith('''.model.1.bias''' ) and len(key.split('''.''' ) ) > 10:
__magic_name__ = key.replace('''.model.1.bias''' , '''.conv1d_1.bias''' )
elif key.endswith('''.model.1.weight''' ) and len(key.split('''.''' ) ) > 10:
__magic_name__ = key.replace('''.model.1.weight''' , '''.conv1d_1.weight''' )
elif key.endswith('''.model.3.bias''' ) and len(key.split('''.''' ) ) > 10:
__magic_name__ = key.replace('''.model.3.bias''' , '''.conv1d_2.bias''' )
elif key.endswith('''.model.3.weight''' ) and len(key.split('''.''' ) ) > 10:
__magic_name__ = key.replace('''.model.3.weight''' , '''.conv1d_2.weight''' )
if "conditioner_blocks.0." in key:
__magic_name__ = key.replace('''conditioner_blocks.0''' , '''conditioner_blocks''' )
if "prime_prior" in key:
__magic_name__ = key.replace('''prime_prior''' , '''encoder''' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
__magic_name__ = key.replace('''.emb.''' , '''.''' )
if key.endswith('''k''' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('''.k''' , '''.codebook''' )
if "y_emb." in key:
return key.replace('''y_emb.''' , '''metadata_embedding.''' )
if "x_emb.emb." in key:
__magic_name__ = key.replace('''0.x_emb.emb''' , '''embed_tokens''' )
if "prime_state_ln" in key:
return key.replace('''prime_state_ln''' , '''encoder.final_layer_norm''' )
if ".ln" in key:
return key.replace('''.ln''' , '''.layer_norm''' )
if "_ln" in key:
return key.replace('''_ln''' , '''_layer_norm''' )
if "prime_state_proj" in key:
return key.replace('''prime_state_proj''' , '''encoder.proj_in''' )
if "prime_x_out" in key:
return key.replace('''prime_x_out''' , '''encoder.lm_head''' )
if "prior.x_out" in key:
return key.replace('''x_out''' , '''fc_proj_out''' )
if "x_emb" in key:
return key.replace('''x_emb''' , '''embed_tokens''' )
return key
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : Optional[int] ):
__magic_name__ = {}
import re
__magic_name__ = re.compile(r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
__magic_name__ = re.compile(
r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
__magic_name__ = re.compile(r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
__magic_name__ = re.compile(r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
__magic_name__ = re.compile(
r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
__magic_name__ = re.compile(r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
__magic_name__ = re.compile(r'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)''' )
__magic_name__ = re.compile(
r'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
__magic_name__ = re.compile(r'''conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)''' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(snake_case_ ):
__magic_name__ = re_encoder_block_conv_in.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[2] ) * 2 + int(groups[3] )
__magic_name__ = f'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'
__magic_name__ = re_encoder_block_conv_in.sub(snake_case_ , snake_case_ )
elif re_encoder_block_resnet.fullmatch(snake_case_ ):
__magic_name__ = re_encoder_block_resnet.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[2] ) * 2 + int(groups[3] )
__magic_name__ = {'''1''': 1, '''3''': 2}[groups[-2]]
__magic_name__ = f'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'
__magic_name__ = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
__magic_name__ = prefix + resnet_block
__magic_name__ = re_encoder_block_resnet.sub(snake_case_ , snake_case_ )
elif re_encoder_block_proj_out.fullmatch(snake_case_ ):
__magic_name__ = re_encoder_block_proj_out.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = f'encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'
__magic_name__ = re_encoder_block_proj_out.sub(snake_case_ , snake_case_ )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(snake_case_ ):
__magic_name__ = re_decoder_block_conv_out.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[2] ) * 2 + int(groups[3] ) - 2
__magic_name__ = f'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'
__magic_name__ = re_decoder_block_conv_out.sub(snake_case_ , snake_case_ )
elif re_decoder_block_resnet.fullmatch(snake_case_ ):
__magic_name__ = re_decoder_block_resnet.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[2] ) * 2 + int(groups[3] ) - 2
__magic_name__ = {'''1''': 1, '''3''': 2}[groups[-2]]
__magic_name__ = f'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'
__magic_name__ = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
__magic_name__ = prefix + resnet_block
__magic_name__ = re_decoder_block_resnet.sub(snake_case_ , snake_case_ )
elif re_decoder_block_proj_in.fullmatch(snake_case_ ):
__magic_name__ = re_decoder_block_proj_in.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = f'decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'
__magic_name__ = re_decoder_block_proj_in.sub(snake_case_ , snake_case_ )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(snake_case_ ):
__magic_name__ = re_prior_cond_conv_out.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[1] ) * 2 + int(groups[2] ) - 2
__magic_name__ = f'conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'
__magic_name__ = re_prior_cond_conv_out.sub(snake_case_ , snake_case_ )
elif re_prior_cond_resnet.fullmatch(snake_case_ ):
__magic_name__ = re_prior_cond_resnet.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[1] ) * 2 + int(groups[2] ) - 2
__magic_name__ = {'''1''': 1, '''3''': 2}[groups[-2]]
__magic_name__ = f'conditioner_blocks.upsampler.upsample_block.{block_index}.'
__magic_name__ = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
__magic_name__ = prefix + resnet_block
__magic_name__ = re_prior_cond_resnet.sub(snake_case_ , snake_case_ )
elif re_prior_cond_proj_in.fullmatch(snake_case_ ):
__magic_name__ = re_prior_cond_proj_in.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = f'conditioner_blocks.upsampler.proj_in.{groups[-1]}'
__magic_name__ = re_prior_cond_proj_in.sub(snake_case_ , snake_case_ )
# keep original key
else:
__magic_name__ = original_key
__magic_name__ = replace_key(snake_case_ )
if f'{key_prefix}.{key}' not in model_state_dict or key is None:
print(f'failed converting {original_key} to {key}, does not match' )
# handle missmatched shape
elif value.shape != model_state_dict[f'{key_prefix}.{key}'].shape:
__magic_name__ = model_state_dict[f'{key_prefix}.{key}']
print(f'{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match' )
__magic_name__ = original_key
__magic_name__ = original_key
__magic_name__ = value
return new_dict
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( snake_case_ : Dict=None , snake_case_ : Any=None ):
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' ):
__magic_name__ = requests.get(f'{PREFIX}{file}' , allow_redirects=snake_case_ )
os.makedirs(f'{pytorch_dump_folder_path}/' , exist_ok=snake_case_ )
open(f'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' , '''wb''' ).write(r.content )
__magic_name__ = MODEL_MAPPING[model_name.split('''/''' )[-1]]
__magic_name__ = JukeboxConfig.from_pretrained(snake_case_ )
__magic_name__ = JukeboxModel(snake_case_ )
__magic_name__ = []
__magic_name__ = {}
for i, dict_name in enumerate(snake_case_ ):
__magic_name__ = torch.load(f'{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}' )['''model''']
__magic_name__ = {}
for k in old_dic.keys():
if k.endswith('''.b''' ):
__magic_name__ = old_dic[k]
elif k.endswith('''.w''' ):
__magic_name__ = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
__magic_name__ = old_dic[k]
else:
__magic_name__ = old_dic[k]
__magic_name__ = '''vqvae''' if i == 0 else f'priors.{3 - i}'
__magic_name__ = fix_jukebox_keys(snake_case_ , model.state_dict() , snake_case_ , snake_case_ )
weight_dict.append(snake_case_ )
__magic_name__ = weight_dict.pop(0 )
model.vqvae.load_state_dict(snake_case_ )
for i in range(len(snake_case_ ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
with open(f'{pytorch_dump_folder_path}/mapping.json' , '''w''' ) as txtfile:
json.dump(snake_case_ , snake_case_ )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case_ )
return weight_dict
if __name__ == "__main__":
a_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='jukebox-5b-lyrics',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='jukebox-5b-lyrics-converted',
type=str,
help='Path to the output PyTorch model directory.',
)
a_ : int = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path) | 678 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
a_ : Optional[int] = 16
a_ : int = 32
def _SCREAMING_SNAKE_CASE ( snake_case_ : Accelerator , snake_case_ : int = 16 , snake_case_ : str = "bert-base-cased" ):
__magic_name__ = AutoTokenizer.from_pretrained(snake_case_ )
__magic_name__ = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(snake_case_ : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
__magic_name__ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=snake_case_ , max_length=snake_case_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__magic_name__ = datasets.map(
snake_case_ , batched=snake_case_ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=snake_case_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__magic_name__ = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(snake_case_ : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(snake_case_ , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(snake_case_ , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
__magic_name__ = DataLoader(
tokenized_datasets['''train'''] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ )
__magic_name__ = DataLoader(
tokenized_datasets['''validation'''] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ )
return train_dataloader, eval_dataloader
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : str ):
model.eval()
__magic_name__ = 0
for step, batch in enumerate(snake_case_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__magic_name__ = model(**snake_case_ )
__magic_name__ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__magic_name__ , __magic_name__ = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(snake_case_ ) - 1:
__magic_name__ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__magic_name__ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=snake_case_ , references=snake_case_ , )
__magic_name__ = metric.compute()
return eval_metric["accuracy"]
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Tuple ):
# Initialize accelerator
__magic_name__ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__magic_name__ = config['''lr''']
__magic_name__ = int(config['''num_epochs'''] )
__magic_name__ = int(config['''seed'''] )
__magic_name__ = int(config['''batch_size'''] )
__magic_name__ = args.model_name_or_path
set_seed(snake_case_ )
__magic_name__ , __magic_name__ = get_dataloaders(snake_case_ , snake_case_ , snake_case_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__magic_name__ = AutoModelForSequenceClassification.from_pretrained(snake_case_ , return_dict=snake_case_ )
# Instantiate optimizer
__magic_name__ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__magic_name__ = optimizer_cls(params=model.parameters() , lr=snake_case_ )
if accelerator.state.deepspeed_plugin is not None:
__magic_name__ = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
__magic_name__ = 1
__magic_name__ = (len(snake_case_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__magic_name__ = get_linear_schedule_with_warmup(
optimizer=snake_case_ , num_warmup_steps=0 , num_training_steps=snake_case_ , )
else:
__magic_name__ = DummyScheduler(snake_case_ , total_num_steps=snake_case_ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = accelerator.prepare(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# We need to keep track of how many total steps we have iterated over
__magic_name__ = 0
# We also need to keep track of the stating epoch so files are named properly
__magic_name__ = 0
__magic_name__ = evaluate.load('''glue''' , '''mrpc''' )
__magic_name__ = num_epochs
if args.partial_train_epoch is not None:
__magic_name__ = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
__magic_name__ = args.resume_from_checkpoint.split('''epoch_''' )[1]
__magic_name__ = ''''''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
__magic_name__ = int(snake_case_ ) + 1
__magic_name__ = evaluation_loop(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
accelerator.print('''resumed checkpoint performance:''' , snake_case_ )
accelerator.print('''resumed checkpoint\'s scheduler\'s lr:''' , lr_scheduler.get_lr()[0] )
accelerator.print('''resumed optimizers\'s lr:''' , optimizer.param_groups[0]['''lr'''] )
with open(os.path.join(args.output_dir , f'state_{starting_epoch-1}.json' ) , '''r''' ) as f:
__magic_name__ = json.load(snake_case_ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
__magic_name__ = {}
for epoch in range(snake_case_ , snake_case_ ):
model.train()
for step, batch in enumerate(snake_case_ ):
__magic_name__ = model(**snake_case_ )
__magic_name__ = outputs.loss
__magic_name__ = loss / gradient_accumulation_steps
accelerator.backward(snake_case_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
__magic_name__ = f'epoch_{epoch}'
__magic_name__ = os.path.join(args.output_dir , snake_case_ )
accelerator.save_state(snake_case_ )
__magic_name__ = evaluation_loop(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
__magic_name__ = accuracy
__magic_name__ = lr_scheduler.get_lr()[0]
__magic_name__ = optimizer.param_groups[0]['''lr''']
__magic_name__ = epoch
__magic_name__ = overall_step
accelerator.print(f'epoch {epoch}:' , snake_case_ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f'state_{epoch}.json' ) , '''w''' ) as f:
json.dump(snake_case_ , snake_case_ )
def _SCREAMING_SNAKE_CASE ( ):
__magic_name__ = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=snake_case_ , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=snake_case_ , )
parser.add_argument(
'''--output_dir''' , type=snake_case_ , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--resume_from_checkpoint''' , type=snake_case_ , default=snake_case_ , help='''If the training should continue from a checkpoint folder.''' , )
parser.add_argument(
'''--partial_train_epoch''' , type=snake_case_ , default=snake_case_ , help='''If passed, the training will stop after this number of epochs.''' , )
parser.add_argument(
'''--num_epochs''' , type=snake_case_ , default=2 , help='''Number of train epochs.''' , )
__magic_name__ = parser.parse_args()
__magic_name__ = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(snake_case_ , snake_case_ )
if __name__ == "__main__":
main() | 678 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
a_ : List[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : int = ['GPTSw3Tokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
a_ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 678 |
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
return " ".join(
''''''.join(word[::-1] ) if len(snake_case_ ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('Hey wollef sroirraw')) | 678 | 1 |
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
a_ : int = logging.getLogger(__name__)
def _SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : Optional[int] ):
# save results
if os.path.exists(snake_case_ ):
if os.path.exists(os.path.join(snake_case_ , '''config.json''' ) ) and os.path.isfile(
os.path.join(snake_case_ , '''config.json''' ) ):
os.remove(os.path.join(snake_case_ , '''config.json''' ) )
if os.path.exists(os.path.join(snake_case_ , '''pytorch_model.bin''' ) ) and os.path.isfile(
os.path.join(snake_case_ , '''pytorch_model.bin''' ) ):
os.remove(os.path.join(snake_case_ , '''pytorch_model.bin''' ) )
else:
os.makedirs(snake_case_ )
model.save_pretrained(snake_case_ )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Tuple=False ):
__magic_name__ = 2
if unlogit:
__magic_name__ = torch.pow(snake_case_ , snake_case_ )
__magic_name__ = p * torch.log(snake_case_ )
__magic_name__ = 0
return -plogp.sum(dim=-1 )
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] ):
logger.info('''lv, h >\t''' + '''\t'''.join(f'{x + 1}' for x in range(len(snake_case_ ) ) ) )
for row in range(len(snake_case_ ) ):
if tensor.dtype != torch.long:
logger.info(f'layer {row + 1}:\t' + '''\t'''.join(f'{x:.5f}' for x in tensor[row].cpu().data ) )
else:
logger.info(f'layer {row + 1}:\t' + '''\t'''.join(f'{x:d}' for x in tensor[row].cpu().data ) )
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[str] , snake_case_ : Tuple , snake_case_ : Dict , snake_case_ : Any=True , snake_case_ : List[Any]=True , snake_case_ : Optional[Any]=None , snake_case_ : Dict=False ):
__magic_name__ , __magic_name__ = model.config.num_hidden_layers, model.config.num_attention_heads
__magic_name__ = torch.zeros(snake_case_ , snake_case_ ).to(args.device )
__magic_name__ = torch.zeros(snake_case_ , snake_case_ ).to(args.device )
if head_mask is None:
__magic_name__ = torch.ones(snake_case_ , snake_case_ ).to(args.device )
head_mask.requires_grad_(requires_grad=snake_case_ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
__magic_name__ = None
__magic_name__ = 0.0
__magic_name__ = 0.0
for step, inputs in enumerate(tqdm(snake_case_ , desc='''Iteration''' , disable=args.local_rank not in [-1, 0] ) ):
__magic_name__ = tuple(t.to(args.device ) for t in inputs )
((__magic_name__) , ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
__magic_name__ = model(snake_case_ , labels=snake_case_ , head_mask=snake_case_ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
__magic_name__ , __magic_name__ , __magic_name__ = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(snake_case_ ):
__magic_name__ = entropy(attn.detach() , snake_case_ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(snake_case_ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
__magic_name__ = 2
__magic_name__ = torch.pow(torch.pow(snake_case_ , snake_case_ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20
if not args.dont_normalize_global_importance:
__magic_name__ = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('''Attention entropies''' )
print_ad_tensor(snake_case_ )
if compute_importance:
logger.info('''Head importance scores''' )
print_ad_tensor(snake_case_ )
logger.info('''Head ranked by importance scores''' )
__magic_name__ = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
__magic_name__ = torch.arange(
head_importance.numel() , device=args.device )
__magic_name__ = head_ranks.view_as(snake_case_ )
print_ad_tensor(snake_case_ )
return attn_entropy, head_importance, total_loss
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Dict , snake_case_ : Optional[int] ):
__magic_name__ , __magic_name__ , __magic_name__ = compute_heads_importance(snake_case_ , snake_case_ , snake_case_ , compute_entropy=snake_case_ )
__magic_name__ = 1 / loss # instead of downsteam score use the LM loss
logger.info('''Pruning: original score: %f, threshold: %f''' , snake_case_ , original_score * args.masking_threshold )
__magic_name__ = torch.ones_like(snake_case_ )
__magic_name__ = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
__magic_name__ = original_score
while current_score >= original_score * args.masking_threshold:
__magic_name__ = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
__magic_name__ = float('''Inf''' )
__magic_name__ = head_importance.view(-1 ).sort()[1]
if len(snake_case_ ) <= num_to_mask:
print('''BREAK BY num_to_mask''' )
break
# mask heads
__magic_name__ = current_heads_to_mask[:num_to_mask]
logger.info('''Heads to mask: %s''' , str(current_heads_to_mask.tolist() ) )
__magic_name__ = new_head_mask.view(-1 )
__magic_name__ = 0.0
__magic_name__ = new_head_mask.view_as(snake_case_ )
__magic_name__ = new_head_mask.clone().detach()
print_ad_tensor(snake_case_ )
# Compute metric and head importance again
__magic_name__ , __magic_name__ , __magic_name__ = compute_heads_importance(
snake_case_ , snake_case_ , snake_case_ , compute_entropy=snake_case_ , head_mask=snake_case_ )
__magic_name__ = 1 / loss
logger.info(
'''Masking: current score: %f, remaining heads %d (%.1f percents)''' , snake_case_ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info('''Final head mask''' )
print_ad_tensor(snake_case_ )
np.save(os.path.join(args.output_dir , '''head_mask.npy''' ) , head_mask.detach().cpu().numpy() )
return head_mask
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : str ):
__magic_name__ = datetime.now()
__magic_name__ , __magic_name__ , __magic_name__ = compute_heads_importance(
snake_case_ , snake_case_ , snake_case_ , compute_entropy=snake_case_ , compute_importance=snake_case_ , head_mask=snake_case_ )
__magic_name__ = 1 / loss
__magic_name__ = datetime.now() - before_time
__magic_name__ = sum(p.numel() for p in model.parameters() )
__magic_name__ = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(snake_case_ ) )
}
for k, v in heads_to_prune.items():
if isinstance(snake_case_ , snake_case_ ):
__magic_name__ = [
v,
]
assert sum(len(snake_case_ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(snake_case_ )
__magic_name__ = sum(p.numel() for p in model.parameters() )
__magic_name__ = datetime.now()
__magic_name__ , __magic_name__ , __magic_name__ = compute_heads_importance(
snake_case_ , snake_case_ , snake_case_ , compute_entropy=snake_case_ , compute_importance=snake_case_ , head_mask=snake_case_ , actually_pruned=snake_case_ , )
__magic_name__ = 1 / loss
__magic_name__ = datetime.now() - before_time
logger.info(
'''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' , snake_case_ , snake_case_ , pruned_num_params / original_num_params * 100 , )
logger.info('''Pruning: score with masking: %f score with pruning: %f''' , snake_case_ , snake_case_ )
logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' , original_time / new_time * 100 )
save_model(snake_case_ , args.output_dir )
def _SCREAMING_SNAKE_CASE ( ):
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--data_dir''' , default=snake_case_ , type=snake_case_ , required=snake_case_ , help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=snake_case_ , type=snake_case_ , required=snake_case_ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--output_dir''' , default=snake_case_ , type=snake_case_ , required=snake_case_ , help='''The output directory where the model predictions and checkpoints will be written.''' , )
# Other parameters
parser.add_argument(
'''--config_name''' , default='''''' , type=snake_case_ , help='''Pretrained config name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--tokenizer_name''' , default='''''' , type=snake_case_ , help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--cache_dir''' , default=snake_case_ , type=snake_case_ , help='''Where do you want to store the pre-trained models downloaded from s3''' , )
parser.add_argument(
'''--data_subset''' , type=snake_case_ , default=-1 , help='''If > 0: limit the data to a subset of data_subset instances.''' )
parser.add_argument(
'''--overwrite_output_dir''' , action='''store_true''' , help='''Whether to overwrite data in output directory''' )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
parser.add_argument(
'''--dont_normalize_importance_by_layer''' , action='''store_true''' , help='''Don\'t normalize importance score by layers''' )
parser.add_argument(
'''--dont_normalize_global_importance''' , action='''store_true''' , help='''Don\'t normalize all importance scores between 0 and 1''' , )
parser.add_argument(
'''--try_masking''' , action='''store_true''' , help='''Whether to try to mask head until a threshold of accuracy.''' )
parser.add_argument(
'''--masking_threshold''' , default=0.9 , type=snake_case_ , help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' , )
parser.add_argument(
'''--masking_amount''' , default=0.1 , type=snake_case_ , help='''Amount to heads to masking at each masking step.''' )
parser.add_argument('''--metric_name''' , default='''acc''' , type=snake_case_ , help='''Metric to use for head masking.''' )
parser.add_argument(
'''--max_seq_length''' , default=128 , type=snake_case_ , help=(
'''The maximum total input sequence length after WordPiece tokenization. \n'''
'''Sequences longer than this will be truncated, sequences shorter padded.'''
) , )
parser.add_argument('''--batch_size''' , default=1 , type=snake_case_ , help='''Batch size.''' )
parser.add_argument('''--seed''' , type=snake_case_ , default=42 )
parser.add_argument('''--local_rank''' , type=snake_case_ , default=-1 , help='''local_rank for distributed training on gpus''' )
parser.add_argument('''--no_cuda''' , action='''store_true''' , help='''Whether not to use CUDA when available''' )
parser.add_argument('''--server_ip''' , type=snake_case_ , default='''''' , help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' , type=snake_case_ , default='''''' , help='''Can be used for distant debugging.''' )
__magic_name__ = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=snake_case_ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
__magic_name__ = torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''' )
__magic_name__ = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
__magic_name__ = torch.device('''cuda''' , args.local_rank )
__magic_name__ = 1
torch.distributed.init_process_group(backend='''nccl''' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
__magic_name__ = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
__magic_name__ = nn.parallel.DistributedDataParallel(
snake_case_ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=snake_case_ )
elif args.n_gpu > 1:
__magic_name__ = nn.DataParallel(snake_case_ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=snake_case_ )
torch.save(snake_case_ , os.path.join(args.output_dir , '''run_args.bin''' ) )
logger.info('''Training/evaluation parameters %s''' , snake_case_ )
# Prepare dataset
__magic_name__ = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
__magic_name__ = (torch.from_numpy(snake_case_ ),)
__magic_name__ = TensorDataset(*snake_case_ )
__magic_name__ = RandomSampler(snake_case_ )
__magic_name__ = DataLoader(snake_case_ , sampler=snake_case_ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(snake_case_ , snake_case_ , snake_case_ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
__magic_name__ = mask_heads(snake_case_ , snake_case_ , snake_case_ )
prune_heads(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
if __name__ == "__main__":
main() | 678 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
a_ : Any = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n'
a_ : int = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n'
a_ : List[str] = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE_ ( datasets.Metric ):
"""simple docstring"""
def __A ( self ) -> List[Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/krishnap25/mauve''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/krishnap25/mauve'''] , reference_urls=[
'''https://arxiv.org/abs/2102.01454''',
'''https://github.com/krishnap25/mauve''',
] , )
def __A ( self , A , A , A=None , A=None , A=None , A=None , A="auto" , A=-1 , A=0.9 , A=5 , A=5_00 , A="gpt2-large" , A=-1 , A=10_24 , A=25 , A=5 , A=True , A=25 , ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = compute_mauve(
p_text=A , q_text=A , p_features=A , q_features=A , p_tokens=A , q_tokens=A , num_buckets=A , pca_max_data=A , kmeans_explained_var=A , kmeans_num_redo=A , kmeans_max_iter=A , featurize_model_name=A , device_id=A , max_text_length=A , divergence_curve_discretization_size=A , mauve_scaling_factor=A , verbose=A , seed=A , )
return out | 678 | 1 |
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def _SCREAMING_SNAKE_CASE ( snake_case_ : Tuple , snake_case_ : Union[str, Any] , snake_case_ : List[str] , snake_case_ : Union[str, Any] ):
__magic_name__ = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, nicht wahr?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
__magic_name__ = {
'''wmt16-en-de-dist-12-1''': [28.3, 27.52],
'''wmt16-en-de-dist-6-1''': [27.4, 27.11],
'''wmt16-en-de-12-1''': [26.9, 25.75],
}
__magic_name__ = f'{src_lang}-{tgt_lang}'
__magic_name__ = f'\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "allenai/{model_name}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n'
model_card_dir.mkdir(parents=snake_case_ , exist_ok=snake_case_ )
__magic_name__ = os.path.join(snake_case_ , '''README.md''' )
print(f'Generating {path}' )
with open(snake_case_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(snake_case_ )
# make sure we are under the root of the project
a_ : Tuple = Path(__file__).resolve().parent.parent.parent
a_ : Dict = repo_dir / 'model_cards'
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
a_ : List[str] = model_cards_dir / 'allenai' / model_name
write_model_card(model_card_dir, src_lang='en', tgt_lang='de', model_name=model_name) | 678 |
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
a_ : Tuple = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
a_ : List[str] = typing.Union[np.floataa, int, float] # noqa: UP007
def _SCREAMING_SNAKE_CASE ( snake_case_ : Vector , snake_case_ : Vector ):
return np.sqrt(np.sum((np.asarray(snake_case_ ) - np.asarray(snake_case_ )) ** 2 ) )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Vector , snake_case_ : Vector ):
return sum((va - va) ** 2 for va, va in zip(snake_case_ , snake_case_ ) ) ** (1 / 2)
if __name__ == "__main__":
def _SCREAMING_SNAKE_CASE ( ):
from timeit import timeit
print('''Without Numpy''' )
print(
timeit(
'''euclidean_distance_no_np([1, 2, 3], [4, 5, 6])''' , number=1_0000 , globals=globals() , ) )
print('''With Numpy''' )
print(
timeit(
'''euclidean_distance([1, 2, 3], [4, 5, 6])''' , number=1_0000 , globals=globals() , ) )
benchmark() | 678 | 1 |
def _SCREAMING_SNAKE_CASE ( snake_case_ : int ):
if not isinstance(snake_case_ , snake_case_ ):
__magic_name__ = f'Input value of [number={number}] must be an integer'
raise TypeError(snake_case_ )
if number < 0:
return False
__magic_name__ = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod() | 678 |
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
a_ : str = logging.get_logger(__name__)
a_ : Union[str, Any] = 'https://openaipublic.azureedge.net/jukebox/models/'
a_ : List[Any] = {
'jukebox-1b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'1b_lyrics/prior_level_2.pth.tar',
],
'jukebox-5b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'5b_lyrics/prior_level_2.pth.tar',
],
}
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] ):
if key.endswith('''.model.1.bias''' ) and len(key.split('''.''' ) ) > 10:
__magic_name__ = key.replace('''.model.1.bias''' , '''.conv1d_1.bias''' )
elif key.endswith('''.model.1.weight''' ) and len(key.split('''.''' ) ) > 10:
__magic_name__ = key.replace('''.model.1.weight''' , '''.conv1d_1.weight''' )
elif key.endswith('''.model.3.bias''' ) and len(key.split('''.''' ) ) > 10:
__magic_name__ = key.replace('''.model.3.bias''' , '''.conv1d_2.bias''' )
elif key.endswith('''.model.3.weight''' ) and len(key.split('''.''' ) ) > 10:
__magic_name__ = key.replace('''.model.3.weight''' , '''.conv1d_2.weight''' )
if "conditioner_blocks.0." in key:
__magic_name__ = key.replace('''conditioner_blocks.0''' , '''conditioner_blocks''' )
if "prime_prior" in key:
__magic_name__ = key.replace('''prime_prior''' , '''encoder''' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
__magic_name__ = key.replace('''.emb.''' , '''.''' )
if key.endswith('''k''' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('''.k''' , '''.codebook''' )
if "y_emb." in key:
return key.replace('''y_emb.''' , '''metadata_embedding.''' )
if "x_emb.emb." in key:
__magic_name__ = key.replace('''0.x_emb.emb''' , '''embed_tokens''' )
if "prime_state_ln" in key:
return key.replace('''prime_state_ln''' , '''encoder.final_layer_norm''' )
if ".ln" in key:
return key.replace('''.ln''' , '''.layer_norm''' )
if "_ln" in key:
return key.replace('''_ln''' , '''_layer_norm''' )
if "prime_state_proj" in key:
return key.replace('''prime_state_proj''' , '''encoder.proj_in''' )
if "prime_x_out" in key:
return key.replace('''prime_x_out''' , '''encoder.lm_head''' )
if "prior.x_out" in key:
return key.replace('''x_out''' , '''fc_proj_out''' )
if "x_emb" in key:
return key.replace('''x_emb''' , '''embed_tokens''' )
return key
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : Optional[int] ):
__magic_name__ = {}
import re
__magic_name__ = re.compile(r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
__magic_name__ = re.compile(
r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
__magic_name__ = re.compile(r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
__magic_name__ = re.compile(r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
__magic_name__ = re.compile(
r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
__magic_name__ = re.compile(r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
__magic_name__ = re.compile(r'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)''' )
__magic_name__ = re.compile(
r'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
__magic_name__ = re.compile(r'''conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)''' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(snake_case_ ):
__magic_name__ = re_encoder_block_conv_in.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[2] ) * 2 + int(groups[3] )
__magic_name__ = f'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'
__magic_name__ = re_encoder_block_conv_in.sub(snake_case_ , snake_case_ )
elif re_encoder_block_resnet.fullmatch(snake_case_ ):
__magic_name__ = re_encoder_block_resnet.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[2] ) * 2 + int(groups[3] )
__magic_name__ = {'''1''': 1, '''3''': 2}[groups[-2]]
__magic_name__ = f'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'
__magic_name__ = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
__magic_name__ = prefix + resnet_block
__magic_name__ = re_encoder_block_resnet.sub(snake_case_ , snake_case_ )
elif re_encoder_block_proj_out.fullmatch(snake_case_ ):
__magic_name__ = re_encoder_block_proj_out.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = f'encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'
__magic_name__ = re_encoder_block_proj_out.sub(snake_case_ , snake_case_ )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(snake_case_ ):
__magic_name__ = re_decoder_block_conv_out.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[2] ) * 2 + int(groups[3] ) - 2
__magic_name__ = f'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'
__magic_name__ = re_decoder_block_conv_out.sub(snake_case_ , snake_case_ )
elif re_decoder_block_resnet.fullmatch(snake_case_ ):
__magic_name__ = re_decoder_block_resnet.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[2] ) * 2 + int(groups[3] ) - 2
__magic_name__ = {'''1''': 1, '''3''': 2}[groups[-2]]
__magic_name__ = f'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'
__magic_name__ = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
__magic_name__ = prefix + resnet_block
__magic_name__ = re_decoder_block_resnet.sub(snake_case_ , snake_case_ )
elif re_decoder_block_proj_in.fullmatch(snake_case_ ):
__magic_name__ = re_decoder_block_proj_in.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = f'decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'
__magic_name__ = re_decoder_block_proj_in.sub(snake_case_ , snake_case_ )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(snake_case_ ):
__magic_name__ = re_prior_cond_conv_out.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[1] ) * 2 + int(groups[2] ) - 2
__magic_name__ = f'conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'
__magic_name__ = re_prior_cond_conv_out.sub(snake_case_ , snake_case_ )
elif re_prior_cond_resnet.fullmatch(snake_case_ ):
__magic_name__ = re_prior_cond_resnet.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[1] ) * 2 + int(groups[2] ) - 2
__magic_name__ = {'''1''': 1, '''3''': 2}[groups[-2]]
__magic_name__ = f'conditioner_blocks.upsampler.upsample_block.{block_index}.'
__magic_name__ = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
__magic_name__ = prefix + resnet_block
__magic_name__ = re_prior_cond_resnet.sub(snake_case_ , snake_case_ )
elif re_prior_cond_proj_in.fullmatch(snake_case_ ):
__magic_name__ = re_prior_cond_proj_in.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = f'conditioner_blocks.upsampler.proj_in.{groups[-1]}'
__magic_name__ = re_prior_cond_proj_in.sub(snake_case_ , snake_case_ )
# keep original key
else:
__magic_name__ = original_key
__magic_name__ = replace_key(snake_case_ )
if f'{key_prefix}.{key}' not in model_state_dict or key is None:
print(f'failed converting {original_key} to {key}, does not match' )
# handle missmatched shape
elif value.shape != model_state_dict[f'{key_prefix}.{key}'].shape:
__magic_name__ = model_state_dict[f'{key_prefix}.{key}']
print(f'{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match' )
__magic_name__ = original_key
__magic_name__ = original_key
__magic_name__ = value
return new_dict
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( snake_case_ : Dict=None , snake_case_ : Any=None ):
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' ):
__magic_name__ = requests.get(f'{PREFIX}{file}' , allow_redirects=snake_case_ )
os.makedirs(f'{pytorch_dump_folder_path}/' , exist_ok=snake_case_ )
open(f'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' , '''wb''' ).write(r.content )
__magic_name__ = MODEL_MAPPING[model_name.split('''/''' )[-1]]
__magic_name__ = JukeboxConfig.from_pretrained(snake_case_ )
__magic_name__ = JukeboxModel(snake_case_ )
__magic_name__ = []
__magic_name__ = {}
for i, dict_name in enumerate(snake_case_ ):
__magic_name__ = torch.load(f'{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}' )['''model''']
__magic_name__ = {}
for k in old_dic.keys():
if k.endswith('''.b''' ):
__magic_name__ = old_dic[k]
elif k.endswith('''.w''' ):
__magic_name__ = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
__magic_name__ = old_dic[k]
else:
__magic_name__ = old_dic[k]
__magic_name__ = '''vqvae''' if i == 0 else f'priors.{3 - i}'
__magic_name__ = fix_jukebox_keys(snake_case_ , model.state_dict() , snake_case_ , snake_case_ )
weight_dict.append(snake_case_ )
__magic_name__ = weight_dict.pop(0 )
model.vqvae.load_state_dict(snake_case_ )
for i in range(len(snake_case_ ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
with open(f'{pytorch_dump_folder_path}/mapping.json' , '''w''' ) as txtfile:
json.dump(snake_case_ , snake_case_ )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case_ )
return weight_dict
if __name__ == "__main__":
a_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='jukebox-5b-lyrics',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='jukebox-5b-lyrics-converted',
type=str,
help='Path to the output PyTorch model directory.',
)
a_ : int = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path) | 678 | 1 |
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
a_ : Union[str, Any] = get_logger(__name__)
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , A = None ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = (
os.path.join(A , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
__magic_name__ = Extractor
def __A ( self , A ) -> str:
'''simple docstring'''
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
__magic_name__ = os.path.abspath(A )
return os.path.join(self.extract_dir , hash_url_to_filename(A ) )
def __A ( self , A , A ) -> bool:
'''simple docstring'''
return force_extract or (
not os.path.isfile(A ) and not (os.path.isdir(A ) and os.listdir(A ))
)
def __A ( self , A , A = False ) -> str:
'''simple docstring'''
__magic_name__ = self.extractor.infer_extractor_format(A )
if not extractor_format:
return input_path
__magic_name__ = self._get_output_path(A )
if self._do_extract(A , A ):
self.extractor.extract(A , A , A )
return output_path
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@classmethod
@abstractmethod
def __A ( cls , A , **A ) -> bool:
'''simple docstring'''
...
@staticmethod
@abstractmethod
def __A ( A , A ) -> None:
'''simple docstring'''
...
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = []
@staticmethod
def __A ( A , A ) -> Optional[Any]:
'''simple docstring'''
with open(A , '''rb''' ) as f:
return f.read(A )
@classmethod
def __A ( cls , A , A = b"" ) -> bool:
'''simple docstring'''
if not magic_number:
__magic_name__ = max(len(A ) for cls_magic_number in cls.magic_numbers )
try:
__magic_name__ = cls.read_magic_number(A , A )
except OSError:
return False
return any(magic_number.startswith(A ) for cls_magic_number in cls.magic_numbers )
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@classmethod
def __A ( cls , A , **A ) -> bool:
'''simple docstring'''
return tarfile.is_tarfile(A )
@staticmethod
def __A ( A , A ) -> str:
'''simple docstring'''
def resolved(A ) -> str:
return os.path.realpath(os.path.abspath(A ) )
def badpath(A , A ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(A , A ) ).startswith(A )
def badlink(A , A ) -> bool:
# Links are interpreted relative to the directory containing the link
__magic_name__ = resolved(os.path.join(A , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=A )
__magic_name__ = resolved(A )
for finfo in members:
if badpath(finfo.name , A ):
logger.error(F'Extraction of {finfo.name} is blocked (illegal path)' )
elif finfo.issym() and badlink(A , A ):
logger.error(F'Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}' )
elif finfo.islnk() and badlink(A , A ):
logger.error(F'Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}' )
else:
yield finfo
@staticmethod
def __A ( A , A ) -> None:
'''simple docstring'''
os.makedirs(A , exist_ok=A )
__magic_name__ = tarfile.open(A )
tar_file.extractall(A , members=TarExtractor.safemembers(A , A ) )
tar_file.close()
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = [B"""\x1F\x8B"""]
@staticmethod
def __A ( A , A ) -> None:
'''simple docstring'''
with gzip.open(A , '''rb''' ) as gzip_file:
with open(A , '''wb''' ) as extracted_file:
shutil.copyfileobj(A , A )
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = [
B"""PK\x03\x04""",
B"""PK\x05\x06""", # empty archive
B"""PK\x07\x08""", # spanned archive
]
@classmethod
def __A ( cls , A , A = b"" ) -> bool:
'''simple docstring'''
if super().is_extractable(A , magic_number=A ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(A , '''rb''' ) as fp:
__magic_name__ = _EndRecData(A )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
__magic_name__ = fp.read(A ) # CD is where we expect it to be
if len(A ) == sizeCentralDir:
__magic_name__ = struct.unpack(A , A ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def __A ( A , A ) -> None:
'''simple docstring'''
os.makedirs(A , exist_ok=A )
with zipfile.ZipFile(A , '''r''' ) as zip_file:
zip_file.extractall(A )
zip_file.close()
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = [B"""\xFD\x37\x7A\x58\x5A\x00"""]
@staticmethod
def __A ( A , A ) -> None:
'''simple docstring'''
with lzma.open(A ) as compressed_file:
with open(A , '''wb''' ) as extracted_file:
shutil.copyfileobj(A , A )
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = [B"""Rar!\x1a\x07\x00""", B"""Rar!\x1a\x07\x01\x00"""] # RAR_ID # RAR5_ID
@staticmethod
def __A ( A , A ) -> None:
'''simple docstring'''
if not config.RARFILE_AVAILABLE:
raise ImportError('''Please pip install rarfile''' )
import rarfile
os.makedirs(A , exist_ok=A )
__magic_name__ = rarfile.RarFile(A )
rf.extractall(A )
rf.close()
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = [B"""\x28\xb5\x2F\xFD"""]
@staticmethod
def __A ( A , A ) -> None:
'''simple docstring'''
if not config.ZSTANDARD_AVAILABLE:
raise ImportError('''Please pip install zstandard''' )
import zstandard as zstd
__magic_name__ = zstd.ZstdDecompressor()
with open(A , '''rb''' ) as ifh, open(A , '''wb''' ) as ofh:
dctx.copy_stream(A , A )
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = [B"""\x42\x5A\x68"""]
@staticmethod
def __A ( A , A ) -> None:
'''simple docstring'''
with bza.open(A , '''rb''' ) as compressed_file:
with open(A , '''wb''' ) as extracted_file:
shutil.copyfileobj(A , A )
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = [B"""\x37\x7A\xBC\xAF\x27\x1C"""]
@staticmethod
def __A ( A , A ) -> None:
'''simple docstring'''
if not config.PY7ZR_AVAILABLE:
raise ImportError('''Please pip install py7zr''' )
import pyazr
os.makedirs(A , exist_ok=A )
with pyazr.SevenZipFile(A , '''r''' ) as archive:
archive.extractall(A )
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = [B"""\x04\x22\x4D\x18"""]
@staticmethod
def __A ( A , A ) -> None:
'''simple docstring'''
if not config.LZ4_AVAILABLE:
raise ImportError('''Please pip install lz4''' )
import lza.frame
with lza.frame.open(A , '''rb''' ) as compressed_file:
with open(A , '''wb''' ) as extracted_file:
shutil.copyfileobj(A , A )
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
_a = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def __A ( cls ) -> Optional[Any]:
'''simple docstring'''
return max(
len(A )
for extractor in cls.extractors.values()
if issubclass(A , A )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def __A ( A , A ) -> Optional[Any]:
'''simple docstring'''
try:
return MagicNumberBaseExtractor.read_magic_number(A , magic_number_length=A )
except OSError:
return b""
@classmethod
def __A ( cls , A , A = False ) -> bool:
'''simple docstring'''
warnings.warn(
'''Method \'is_extractable\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '''
'''Use \'infer_extractor_format\' instead.''' , category=A , )
__magic_name__ = cls.infer_extractor_format(A )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def __A ( cls , A ) -> str: # <Added version="2.4.0"/>
'''simple docstring'''
__magic_name__ = cls._get_magic_number_max_length()
__magic_name__ = cls._read_magic_number(A , A )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(A , magic_number=A ):
return extractor_format
@classmethod
def __A ( cls , A , A , A = None , A = "deprecated" , ) -> None:
'''simple docstring'''
os.makedirs(os.path.dirname(A ) , exist_ok=A )
# Prevent parallel extractions
__magic_name__ = str(Path(A ).with_suffix('''.lock''' ) )
with FileLock(A ):
shutil.rmtree(A , ignore_errors=A )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(A , A ): # passed as positional arg
warnings.warn(
'''Parameter \'extractor\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '''
'''Use \'extractor_format\' instead.''' , category=A , )
__magic_name__ = extractor if extractor != '''deprecated''' else extractor_format
else:
__magic_name__ = cls.extractors[extractor_format]
return extractor.extract(A , A )
else:
warnings.warn(
'''Parameter \'extractor_format\' was made required in version 2.4.0 and not passing it will raise an '''
'''exception in 3.0.0.''' , category=A , )
for extractor in cls.extractors.values():
if extractor.is_extractable(A ):
return extractor.extract(A , A ) | 678 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a_ : int = logging.get_logger(__name__)
a_ : Optional[int] = {
'microsoft/table-transformer-detection': (
'https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'
),
}
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = """table-transformer"""
_a = ["""past_key_values"""]
_a = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , A=True , A=None , A=3 , A=1_00 , A=6 , A=20_48 , A=8 , A=6 , A=20_48 , A=8 , A=0.0 , A=0.0 , A=True , A="relu" , A=2_56 , A=0.1 , A=0.0 , A=0.0 , A=0.02 , A=1.0 , A=False , A="sine" , A="resnet50" , A=True , A=False , A=1 , A=5 , A=2 , A=1 , A=1 , A=5 , A=2 , A=0.1 , **A , ) -> Any:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
__magic_name__ = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(A , A ):
__magic_name__ = backbone_config.get('''model_type''' )
__magic_name__ = CONFIG_MAPPING[backbone_model_type]
__magic_name__ = config_class.from_dict(A )
# set timm attributes to None
__magic_name__ , __magic_name__ , __magic_name__ = None, None, None
__magic_name__ = use_timm_backbone
__magic_name__ = backbone_config
__magic_name__ = num_channels
__magic_name__ = num_queries
__magic_name__ = d_model
__magic_name__ = encoder_ffn_dim
__magic_name__ = encoder_layers
__magic_name__ = encoder_attention_heads
__magic_name__ = decoder_ffn_dim
__magic_name__ = decoder_layers
__magic_name__ = decoder_attention_heads
__magic_name__ = dropout
__magic_name__ = attention_dropout
__magic_name__ = activation_dropout
__magic_name__ = activation_function
__magic_name__ = init_std
__magic_name__ = init_xavier_std
__magic_name__ = encoder_layerdrop
__magic_name__ = decoder_layerdrop
__magic_name__ = encoder_layers
__magic_name__ = auxiliary_loss
__magic_name__ = position_embedding_type
__magic_name__ = backbone
__magic_name__ = use_pretrained_backbone
__magic_name__ = dilation
# Hungarian matcher
__magic_name__ = class_cost
__magic_name__ = bbox_cost
__magic_name__ = giou_cost
# Loss coefficients
__magic_name__ = mask_loss_coefficient
__magic_name__ = dice_loss_coefficient
__magic_name__ = bbox_loss_coefficient
__magic_name__ = giou_loss_coefficient
__magic_name__ = eos_coefficient
super().__init__(is_encoder_decoder=A , **A )
@property
def __A ( self ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def __A ( self ) -> int:
'''simple docstring'''
return self.d_model
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = version.parse("""1.11""" )
@property
def __A ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def __A ( self ) -> float:
'''simple docstring'''
return 1E-5
@property
def __A ( self ) -> int:
'''simple docstring'''
return 12 | 678 | 1 |
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __A ( self ) -> List[Any]:
'''simple docstring'''
__magic_name__ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(A , '''embed_dim''' ) )
self.parent.assertTrue(hasattr(A , '''num_heads''' ) )
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , A , A=13 , A=64 , A=3 , A=[16, 48, 96] , A=[1, 3, 6] , A=[1, 2, 10] , A=[7, 3, 3] , A=[4, 2, 2] , A=[2, 1, 1] , A=[2, 2, 2] , A=[False, False, True] , A=[0.0, 0.0, 0.0] , A=0.02 , A=1E-12 , A=True , A=True , A=2 , ) -> List[Any]:
'''simple docstring'''
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = image_size
__magic_name__ = patch_sizes
__magic_name__ = patch_stride
__magic_name__ = patch_padding
__magic_name__ = is_training
__magic_name__ = use_labels
__magic_name__ = num_labels
__magic_name__ = num_channels
__magic_name__ = embed_dim
__magic_name__ = num_heads
__magic_name__ = stride_kv
__magic_name__ = depth
__magic_name__ = cls_token
__magic_name__ = attention_drop_rate
__magic_name__ = initializer_range
__magic_name__ = layer_norm_eps
def __A ( self ) -> List[Any]:
'''simple docstring'''
__magic_name__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__magic_name__ = None
if self.use_labels:
__magic_name__ = ids_tensor([self.batch_size] , self.num_labels )
__magic_name__ = self.get_config()
return config, pixel_values, labels
def __A ( self ) -> str:
'''simple docstring'''
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def __A ( self , A , A , A ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = CvtModel(config=A )
model.to(A )
model.eval()
__magic_name__ = model(A )
__magic_name__ = (self.image_size, self.image_size)
__magic_name__ , __magic_name__ = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
__magic_name__ = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
__magic_name__ = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def __A ( self , A , A , A ) -> str:
'''simple docstring'''
__magic_name__ = self.num_labels
__magic_name__ = CvtForImageClassification(A )
model.to(A )
model.eval()
__magic_name__ = model(A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self ) -> int:
'''simple docstring'''
__magic_name__ = self.prepare_config_and_inputs()
__magic_name__ , __magic_name__ , __magic_name__ = config_and_inputs
__magic_name__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_a = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
_a = (
{"""feature-extraction""": CvtModel, """image-classification""": CvtForImageClassification}
if is_torch_available()
else {}
)
_a = False
_a = False
_a = False
_a = False
_a = False
def __A ( self ) -> str:
'''simple docstring'''
__magic_name__ = CvtModelTester(self )
__magic_name__ = ConfigTester(self , config_class=A , has_text_modality=A , hidden_size=37 )
def __A ( self ) -> List[Any]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __A ( self ) -> List[Any]:
'''simple docstring'''
return
@unittest.skip(reason='''Cvt does not output attentions''' )
def __A ( self ) -> int:
'''simple docstring'''
pass
@unittest.skip(reason='''Cvt does not use inputs_embeds''' )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''Cvt does not support input and output embeddings''' )
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ = model_class(A )
__magic_name__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ = [*signature.parameters.keys()]
__magic_name__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , A )
def __A ( self ) -> int:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def __A ( self ) -> Dict:
'''simple docstring'''
def check_hidden_states_output(A , A , A ):
__magic_name__ = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
__magic_name__ = model(**self._prepare_for_class(A , A ) )
__magic_name__ = outputs.hidden_states
__magic_name__ = len(self.model_tester.depth )
self.assertEqual(len(A ) , A )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ = True
check_hidden_states_output(A , A , A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__magic_name__ = True
check_hidden_states_output(A , A , A )
def __A ( self ) -> List[Any]:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@slow
def __A ( self ) -> Dict:
'''simple docstring'''
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ = CvtModel.from_pretrained(A )
self.assertIsNotNone(A )
def _SCREAMING_SNAKE_CASE ( ):
__magic_name__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __A ( self ) -> Any:
'''simple docstring'''
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def __A ( self ) -> List[Any]:
'''simple docstring'''
__magic_name__ = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(A )
__magic_name__ = self.default_image_processor
__magic_name__ = prepare_img()
__magic_name__ = image_processor(images=A , return_tensors='''pt''' ).to(A )
# forward pass
with torch.no_grad():
__magic_name__ = model(**A )
# verify the logits
__magic_name__ = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , A )
__magic_name__ = torch.tensor([0.92_85, 0.90_15, -0.31_50] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A , atol=1E-4 ) ) | 678 |
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Optional[int] , snake_case_ : Union[str, Any] ):
# Initialise PyTorch model
__magic_name__ = LxmertConfig.from_json_file(snake_case_ )
print(f'Building PyTorch model from configuration: {config}' )
__magic_name__ = LxmertForPreTraining(snake_case_ )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(snake_case_ , snake_case_ , snake_case_ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , snake_case_ )
if __name__ == "__main__":
a_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a_ : Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path) | 678 | 1 |
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , A ) -> int:
'''simple docstring'''
__magic_name__ = val
__magic_name__ = None
__magic_name__ = None
def __A ( self , A ) -> Optional[int]:
'''simple docstring'''
if self.val:
if val < self.val:
if self.left is None:
__magic_name__ = Node(A )
else:
self.left.insert(A )
elif val > self.val:
if self.right is None:
__magic_name__ = Node(A )
else:
self.right.insert(A )
else:
__magic_name__ = val
def _SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : Any ):
# Recursive traversal
if root:
inorder(root.left , snake_case_ )
res.append(root.val )
inorder(root.right , snake_case_ )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Any ):
# Build BST
if len(snake_case_ ) == 0:
return arr
__magic_name__ = Node(arr[0] )
for i in range(1 , len(snake_case_ ) ):
root.insert(arr[i] )
# Traverse BST in order.
__magic_name__ = []
inorder(snake_case_ , snake_case_ )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13])) | 678 |
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def _SCREAMING_SNAKE_CASE ( snake_case_ : Tuple , snake_case_ : Union[str, Any] , snake_case_ : List[str] , snake_case_ : Union[str, Any] ):
__magic_name__ = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, nicht wahr?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
__magic_name__ = {
'''wmt16-en-de-dist-12-1''': [28.3, 27.52],
'''wmt16-en-de-dist-6-1''': [27.4, 27.11],
'''wmt16-en-de-12-1''': [26.9, 25.75],
}
__magic_name__ = f'{src_lang}-{tgt_lang}'
__magic_name__ = f'\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "allenai/{model_name}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n'
model_card_dir.mkdir(parents=snake_case_ , exist_ok=snake_case_ )
__magic_name__ = os.path.join(snake_case_ , '''README.md''' )
print(f'Generating {path}' )
with open(snake_case_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(snake_case_ )
# make sure we are under the root of the project
a_ : Tuple = Path(__file__).resolve().parent.parent.parent
a_ : Dict = repo_dir / 'model_cards'
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
a_ : List[str] = model_cards_dir / 'allenai' / model_name
write_model_card(model_card_dir, src_lang='en', tgt_lang='de', model_name=model_name) | 678 | 1 |
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any]=1 ):
if n_shave_prefix_segments >= 0:
return ".".join(path.split('''.''' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('''.''' )[:n_shave_prefix_segments] )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Dict , snake_case_ : List[str]=0 ):
__magic_name__ = []
for old_item in old_list:
__magic_name__ = old_item.replace('''in_layers.0''' , '''norm1''' )
__magic_name__ = new_item.replace('''in_layers.2''' , '''conv1''' )
__magic_name__ = new_item.replace('''out_layers.0''' , '''norm2''' )
__magic_name__ = new_item.replace('''out_layers.3''' , '''conv2''' )
__magic_name__ = new_item.replace('''emb_layers.1''' , '''time_emb_proj''' )
__magic_name__ = new_item.replace('''skip_connection''' , '''conv_shortcut''' )
__magic_name__ = shave_segments(snake_case_ , n_shave_prefix_segments=snake_case_ )
mapping.append({'''old''': old_item, '''new''': new_item} )
return mapping
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : int=0 ):
__magic_name__ = []
for old_item in old_list:
__magic_name__ = old_item
__magic_name__ = new_item.replace('''norm.weight''' , '''group_norm.weight''' )
__magic_name__ = new_item.replace('''norm.bias''' , '''group_norm.bias''' )
__magic_name__ = new_item.replace('''proj_out.weight''' , '''proj_attn.weight''' )
__magic_name__ = new_item.replace('''proj_out.bias''' , '''proj_attn.bias''' )
__magic_name__ = shave_segments(snake_case_ , n_shave_prefix_segments=snake_case_ )
mapping.append({'''old''': old_item, '''new''': new_item} )
return mapping
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[str] , snake_case_ : int , snake_case_ : Optional[int] , snake_case_ : List[str]=None , snake_case_ : int=None , snake_case_ : Any=None ):
assert isinstance(snake_case_ , snake_case_ ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
__magic_name__ = old_checkpoint[path]
__magic_name__ = old_tensor.shape[0] // 3
__magic_name__ = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
__magic_name__ = old_tensor.shape[0] // config['''num_head_channels'''] // 3
__magic_name__ = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
__magic_name__ , __magic_name__ , __magic_name__ = old_tensor.split(channels // num_heads , dim=1 )
__magic_name__ = query.reshape(snake_case_ )
__magic_name__ = key.reshape(snake_case_ )
__magic_name__ = value.reshape(snake_case_ )
for path in paths:
__magic_name__ = path['''new''']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
__magic_name__ = new_path.replace('''middle_block.0''' , '''mid_block.resnets.0''' )
__magic_name__ = new_path.replace('''middle_block.1''' , '''mid_block.attentions.0''' )
__magic_name__ = new_path.replace('''middle_block.2''' , '''mid_block.resnets.1''' )
if additional_replacements is not None:
for replacement in additional_replacements:
__magic_name__ = new_path.replace(replacement['''old'''] , replacement['''new'''] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
__magic_name__ = old_checkpoint[path['''old''']][:, :, 0]
else:
__magic_name__ = old_checkpoint[path['''old''']]
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : List[str] ):
__magic_name__ = {}
__magic_name__ = checkpoint['''time_embed.0.weight''']
__magic_name__ = checkpoint['''time_embed.0.bias''']
__magic_name__ = checkpoint['''time_embed.2.weight''']
__magic_name__ = checkpoint['''time_embed.2.bias''']
__magic_name__ = checkpoint['''input_blocks.0.0.weight''']
__magic_name__ = checkpoint['''input_blocks.0.0.bias''']
__magic_name__ = checkpoint['''out.0.weight''']
__magic_name__ = checkpoint['''out.0.bias''']
__magic_name__ = checkpoint['''out.2.weight''']
__magic_name__ = checkpoint['''out.2.bias''']
# Retrieves the keys for the input blocks only
__magic_name__ = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''input_blocks''' in layer} )
__magic_name__ = {
layer_id: [key for key in checkpoint if f'input_blocks.{layer_id}' in key]
for layer_id in range(snake_case_ )
}
# Retrieves the keys for the middle blocks only
__magic_name__ = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''middle_block''' in layer} )
__magic_name__ = {
layer_id: [key for key in checkpoint if f'middle_block.{layer_id}' in key]
for layer_id in range(snake_case_ )
}
# Retrieves the keys for the output blocks only
__magic_name__ = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''output_blocks''' in layer} )
__magic_name__ = {
layer_id: [key for key in checkpoint if f'output_blocks.{layer_id}' in key]
for layer_id in range(snake_case_ )
}
for i in range(1 , snake_case_ ):
__magic_name__ = (i - 1) // (config['''num_res_blocks'''] + 1)
__magic_name__ = (i - 1) % (config['''num_res_blocks'''] + 1)
__magic_name__ = [key for key in input_blocks[i] if f'input_blocks.{i}.0' in key]
__magic_name__ = [key for key in input_blocks[i] if f'input_blocks.{i}.1' in key]
if f'input_blocks.{i}.0.op.weight' in checkpoint:
__magic_name__ = checkpoint[
f'input_blocks.{i}.0.op.weight'
]
__magic_name__ = checkpoint[
f'input_blocks.{i}.0.op.bias'
]
continue
__magic_name__ = renew_resnet_paths(snake_case_ )
__magic_name__ = {'''old''': f'input_blocks.{i}.0', '''new''': f'down_blocks.{block_id}.resnets.{layer_in_block_id}'}
__magic_name__ = {'''old''': '''resnets.2.op''', '''new''': '''downsamplers.0.op'''}
assign_to_checkpoint(
snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path, resnet_op] , config=snake_case_ )
if len(snake_case_ ):
__magic_name__ = renew_attention_paths(snake_case_ )
__magic_name__ = {
'''old''': f'input_blocks.{i}.1',
'''new''': f'down_blocks.{block_id}.attentions.{layer_in_block_id}',
}
__magic_name__ = {
f'input_blocks.{i}.1.qkv.bias': {
'''key''': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias',
'''query''': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias',
'''value''': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias',
},
f'input_blocks.{i}.1.qkv.weight': {
'''key''': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight',
'''query''': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight',
'''value''': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight',
},
}
assign_to_checkpoint(
snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , attention_paths_to_split=snake_case_ , config=snake_case_ , )
__magic_name__ = middle_blocks[0]
__magic_name__ = middle_blocks[1]
__magic_name__ = middle_blocks[2]
__magic_name__ = renew_resnet_paths(snake_case_ )
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , config=snake_case_ )
__magic_name__ = renew_resnet_paths(snake_case_ )
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , config=snake_case_ )
__magic_name__ = renew_attention_paths(snake_case_ )
__magic_name__ = {
'''middle_block.1.qkv.bias''': {
'''key''': '''mid_block.attentions.0.key.bias''',
'''query''': '''mid_block.attentions.0.query.bias''',
'''value''': '''mid_block.attentions.0.value.bias''',
},
'''middle_block.1.qkv.weight''': {
'''key''': '''mid_block.attentions.0.key.weight''',
'''query''': '''mid_block.attentions.0.query.weight''',
'''value''': '''mid_block.attentions.0.value.weight''',
},
}
assign_to_checkpoint(
snake_case_ , snake_case_ , snake_case_ , attention_paths_to_split=snake_case_ , config=snake_case_ )
for i in range(snake_case_ ):
__magic_name__ = i // (config['''num_res_blocks'''] + 1)
__magic_name__ = i % (config['''num_res_blocks'''] + 1)
__magic_name__ = [shave_segments(snake_case_ , 2 ) for name in output_blocks[i]]
__magic_name__ = {}
for layer in output_block_layers:
__magic_name__ , __magic_name__ = layer.split('''.''' )[0], shave_segments(snake_case_ , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(snake_case_ )
else:
__magic_name__ = [layer_name]
if len(snake_case_ ) > 1:
__magic_name__ = [key for key in output_blocks[i] if f'output_blocks.{i}.0' in key]
__magic_name__ = [key for key in output_blocks[i] if f'output_blocks.{i}.1' in key]
__magic_name__ = renew_resnet_paths(snake_case_ )
__magic_name__ = renew_resnet_paths(snake_case_ )
__magic_name__ = {'''old''': f'output_blocks.{i}.0', '''new''': f'up_blocks.{block_id}.resnets.{layer_in_block_id}'}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
if ["conv.weight", "conv.bias"] in output_block_list.values():
__magic_name__ = list(output_block_list.values() ).index(['''conv.weight''', '''conv.bias'''] )
__magic_name__ = checkpoint[
f'output_blocks.{i}.{index}.conv.weight'
]
__magic_name__ = checkpoint[
f'output_blocks.{i}.{index}.conv.bias'
]
# Clear attentions as they have been attributed above.
if len(snake_case_ ) == 2:
__magic_name__ = []
if len(snake_case_ ):
__magic_name__ = renew_attention_paths(snake_case_ )
__magic_name__ = {
'''old''': f'output_blocks.{i}.1',
'''new''': f'up_blocks.{block_id}.attentions.{layer_in_block_id}',
}
__magic_name__ = {
f'output_blocks.{i}.1.qkv.bias': {
'''key''': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias',
'''query''': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias',
'''value''': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias',
},
f'output_blocks.{i}.1.qkv.weight': {
'''key''': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight',
'''query''': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight',
'''value''': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight',
},
}
assign_to_checkpoint(
snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('''qkv''' in key for key in attentions ) else None , config=snake_case_ , )
else:
__magic_name__ = renew_resnet_paths(snake_case_ , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
__magic_name__ = '''.'''.join(['''output_blocks''', str(snake_case_ ), path['''old''']] )
__magic_name__ = '''.'''.join(['''up_blocks''', str(snake_case_ ), '''resnets''', str(snake_case_ ), path['''new''']] )
__magic_name__ = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
a_ : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
a_ : Dict = parser.parse_args()
a_ : Any = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
a_ : str = json.loads(f.read())
a_ : Tuple = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
a_ : Optional[int] = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
a_ : List[Any] = DDPMScheduler.from_config('/'.join(args.checkpoint_path.split('/')[:-1]))
a_ : str = VQModel.from_pretrained('/'.join(args.checkpoint_path.split('/')[:-1]))
a_ : Optional[Any] = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path) | 678 |
def _SCREAMING_SNAKE_CASE ( snake_case_ : list[int] , snake_case_ : list[int] ):
__magic_name__ = len(snake_case_ )
print('''The following activities are selected:''' )
# The first activity is always selected
__magic_name__ = 0
print(snake_case_ , end=''',''' )
# Consider rest of the activities
for j in range(snake_case_ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(snake_case_ , end=''',''' )
__magic_name__ = j
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ : Dict = [1, 3, 0, 5, 8, 5]
a_ : Union[str, Any] = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish) | 678 | 1 |
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
a_ : Tuple = argparse.ArgumentParser(
description=(
'Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='roberta', choices=['roberta', 'gpt2'])
parser.add_argument('--model_name', default='roberta-large', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_roberta_048131723.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
a_ : Optional[Any] = parser.parse_args()
if args.model_type == "roberta":
a_ : Tuple = RobertaForMaskedLM.from_pretrained(args.model_name)
a_ : List[str] = "roberta"
elif args.model_type == "gpt2":
a_ : Any = GPTaLMHeadModel.from_pretrained(args.model_name)
a_ : List[str] = "transformer"
a_ : Tuple = model.state_dict()
a_ : Tuple = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
a_ : Dict = state_dict[F"""{prefix}.{param_name}"""]
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
a_ : List[str] = F"""{prefix}.embeddings.{w}.weight"""
a_ : List[Any] = state_dict[param_name]
for w in ["weight", "bias"]:
a_ : Optional[Any] = F"""{prefix}.embeddings.LayerNorm.{w}"""
a_ : Union[str, Any] = state_dict[param_name]
# Transformer Blocks #
a_ : List[Any] = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
a_ : Tuple = state_dict[
F"""{prefix}.h.{teacher_idx}.{layer}.{w}"""
]
a_ : List[str] = state_dict[F"""{prefix}.h.{teacher_idx}.attn.bias"""]
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
a_ : Optional[Any] = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}"""
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
a_ : Any = state_dict[F"""{layer}"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
a_ : Dict = state_dict[F"""lm_head.dense.{w}"""]
a_ : int = state_dict[F"""lm_head.layer_norm.{w}"""]
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
a_ : Dict = state_dict[F"""{prefix}.ln_f.{w}"""]
a_ : Optional[Any] = state_dict["lm_head.weight"]
print(F"""N layers selected for distillation: {std_idx}""")
print(F"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(F"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 700 |
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
a_ : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
a_ : List[str] = 256
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = ["""melgan"""]
def __init__( self , A , A , A , A , A , ) -> None:
'''simple docstring'''
super().__init__()
# From MELGAN
__magic_name__ = math.log(1E-5 ) # Matches MelGAN training.
__magic_name__ = 4.0 # Largest value for most examples
__magic_name__ = 1_28
self.register_modules(
notes_encoder=A , continuous_encoder=A , decoder=A , scheduler=A , melgan=A , )
def __A ( self , A , A=(-1.0, 1.0) , A=False ) -> List[Any]:
'''simple docstring'''
__magic_name__ , __magic_name__ = output_range
if clip:
__magic_name__ = torch.clip(A , self.min_value , self.max_value )
# Scale to [0, 1].
__magic_name__ = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def __A ( self , A , A=(-1.0, 1.0) , A=False ) -> Optional[int]:
'''simple docstring'''
__magic_name__ , __magic_name__ = input_range
__magic_name__ = torch.clip(A , A , A ) if clip else outputs
# Scale to [0, 1].
__magic_name__ = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def __A ( self , A , A , A ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = input_tokens > 0
__magic_name__ , __magic_name__ = self.notes_encoder(
encoder_input_tokens=A , encoder_inputs_mask=A )
__magic_name__ , __magic_name__ = self.continuous_encoder(
encoder_inputs=A , encoder_inputs_mask=A )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def __A ( self , A , A , A ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = noise_time
if not torch.is_tensor(A ):
__magic_name__ = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(A ) and len(timesteps.shape ) == 0:
__magic_name__ = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__magic_name__ = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
__magic_name__ = self.decoder(
encodings_and_masks=A , decoder_input_tokens=A , decoder_noise_time=A )
return logits
@torch.no_grad()
def __call__( self , A , A = None , A = 1_00 , A = True , A = "numpy" , A = None , A = 1 , ) -> Union[AudioPipelineOutput, Tuple]:
'''simple docstring'''
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A , A ) or callback_steps <= 0)
):
raise ValueError(
F'`callback_steps` has to be a positive integer but is {callback_steps} of type'
F' {type(A )}.' )
__magic_name__ = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
__magic_name__ = np.zeros([1, 0, self.n_dims] , np.floataa )
__magic_name__ = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=A , device=self.device )
for i, encoder_input_tokens in enumerate(A ):
if i == 0:
__magic_name__ = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
__magic_name__ = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=A , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
__magic_name__ = ones
__magic_name__ = self.scale_features(
A , output_range=[-1.0, 1.0] , clip=A )
__magic_name__ = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=A , continuous_mask=A , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
__magic_name__ = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=A , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(A )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__magic_name__ = self.decode(
encodings_and_masks=A , input_tokens=A , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
__magic_name__ = self.scheduler.step(A , A , A , generator=A ).prev_sample
__magic_name__ = self.scale_to_features(A , input_range=[-1.0, 1.0] )
__magic_name__ = mel[:1]
__magic_name__ = mel.cpu().float().numpy()
__magic_name__ = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A , A )
logger.info('''Generated segment''' , A )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'''Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.''' )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'''Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.''' )
if output_type == "numpy":
__magic_name__ = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
__magic_name__ = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=A ) | 678 | 0 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class SCREAMING_SNAKE_CASE_ ( _snake_case ):
"""simple docstring"""
@slow
@require_torch
def __A ( self ) -> List[str]:
'''simple docstring'''
__magic_name__ = EncoderDecoderModel.from_encoder_decoder_pretrained('''prajjwal1/bert-tiny''' , '''prajjwal1/bert-tiny''' )
__magic_name__ = BertTokenizer.from_pretrained('''bert-base-uncased''' )
__magic_name__ = bertabert.config.encoder.vocab_size
__magic_name__ = tokenizer.sep_token_id
__magic_name__ = tokenizer.cls_token_id
__magic_name__ = 1_28
__magic_name__ = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''train[:1%]''' )
__magic_name__ = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''validation[:1%]''' )
__magic_name__ = train_dataset.select(range(32 ) )
__magic_name__ = val_dataset.select(range(16 ) )
__magic_name__ = 4
def _map_to_encoder_decoder_inputs(A ):
# Tokenizer will automatically set [BOS] <text> [EOS]
__magic_name__ = tokenizer(batch['''article'''] , padding='''max_length''' , truncation=lowerCAmelCase__ , max_length=5_12 )
__magic_name__ = tokenizer(batch['''highlights'''] , padding='''max_length''' , truncation=lowerCAmelCase__ , max_length=1_28 )
__magic_name__ = inputs.input_ids
__magic_name__ = inputs.attention_mask
__magic_name__ = outputs.input_ids
__magic_name__ = outputs.input_ids.copy()
__magic_name__ = [
[-1_00 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['''labels''']
]
__magic_name__ = outputs.attention_mask
assert all(len(lowerCAmelCase__ ) == 5_12 for x in inputs.input_ids )
assert all(len(lowerCAmelCase__ ) == 1_28 for x in outputs.input_ids )
return batch
def _compute_metrics(A ):
__magic_name__ = pred.label_ids
__magic_name__ = pred.predictions
# all unnecessary tokens are removed
__magic_name__ = tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
__magic_name__ = tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
__magic_name__ = sum([int(pred_str[i] == label_str[i] ) for i in range(len(lowerCAmelCase__ ) )] ) / len(lowerCAmelCase__ )
return {"accuracy": accuracy}
# map train dataset
__magic_name__ = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=lowerCAmelCase__ , batch_size=lowerCAmelCase__ , remove_columns=['''article''', '''highlights'''] , )
train_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
# same for validation dataset
__magic_name__ = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=lowerCAmelCase__ , batch_size=lowerCAmelCase__ , remove_columns=['''article''', '''highlights'''] , )
val_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
__magic_name__ = self.get_auto_remove_tmp_dir()
__magic_name__ = SeqaSeqTrainingArguments(
output_dir=lowerCAmelCase__ , per_device_train_batch_size=lowerCAmelCase__ , per_device_eval_batch_size=lowerCAmelCase__ , predict_with_generate=lowerCAmelCase__ , evaluation_strategy='''steps''' , do_train=lowerCAmelCase__ , do_eval=lowerCAmelCase__ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
__magic_name__ = SeqaSeqTrainer(
model=lowerCAmelCase__ , args=lowerCAmelCase__ , compute_metrics=_compute_metrics , train_dataset=lowerCAmelCase__ , eval_dataset=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , )
# start training
trainer.train() | 701 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel | 678 | 0 |
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : Tuple ):
__magic_name__ , __magic_name__ = set(_SCREAMING_SNAKE_CASE ), [start]
while stack:
__magic_name__ = stack.pop()
explored.add(_SCREAMING_SNAKE_CASE )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(_SCREAMING_SNAKE_CASE )
return explored
a_ : List[str] = {
'A': ['B', 'C', 'D'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F'],
'D': ['B', 'D'],
'E': ['B', 'F'],
'F': ['C', 'E', 'G'],
'G': ['F'],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, 'A')) | 702 |
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] ):
__magic_name__ = SwinConfig(image_size=192 )
if "base" in model_name:
__magic_name__ = 6
__magic_name__ = 128
__magic_name__ = (2, 2, 18, 2)
__magic_name__ = (4, 8, 16, 32)
elif "large" in model_name:
__magic_name__ = 12
__magic_name__ = 192
__magic_name__ = (2, 2, 18, 2)
__magic_name__ = (6, 12, 24, 48)
else:
raise ValueError('''Model not supported, only supports base and large variants''' )
__magic_name__ = window_size
__magic_name__ = embed_dim
__magic_name__ = depths
__magic_name__ = num_heads
return config
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] ):
if "encoder.mask_token" in name:
__magic_name__ = name.replace('''encoder.mask_token''' , '''embeddings.mask_token''' )
if "encoder.patch_embed.proj" in name:
__magic_name__ = name.replace('''encoder.patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "encoder.patch_embed.norm" in name:
__magic_name__ = name.replace('''encoder.patch_embed.norm''' , '''embeddings.norm''' )
if "attn.proj" in name:
__magic_name__ = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
__magic_name__ = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
__magic_name__ = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
__magic_name__ = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
__magic_name__ = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
__magic_name__ = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
__magic_name__ = '''layernorm.weight'''
if name == "encoder.norm.bias":
__magic_name__ = '''layernorm.bias'''
if "decoder" in name:
pass
else:
__magic_name__ = '''swin.''' + name
return name
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Any ):
for key in orig_state_dict.copy().keys():
__magic_name__ = orig_state_dict.pop(snake_case_ )
if "attn_mask" in key:
pass
elif "qkv" in key:
__magic_name__ = key.split('''.''' )
__magic_name__ = int(key_split[2] )
__magic_name__ = int(key_split[4] )
__magic_name__ = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__magic_name__ = val[:dim, :]
__magic_name__ = val[
dim : dim * 2, :
]
__magic_name__ = val[-dim:, :]
else:
__magic_name__ = val[
:dim
]
__magic_name__ = val[
dim : dim * 2
]
__magic_name__ = val[
-dim:
]
else:
__magic_name__ = val
return orig_state_dict
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[str] , snake_case_ : int , snake_case_ : Any , snake_case_ : str ):
__magic_name__ = torch.load(snake_case_ , map_location='''cpu''' )['''model''']
__magic_name__ = get_swin_config(snake_case_ )
__magic_name__ = SwinForMaskedImageModeling(snake_case_ )
model.eval()
__magic_name__ = convert_state_dict(snake_case_ , snake_case_ )
model.load_state_dict(snake_case_ )
__magic_name__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__magic_name__ = ViTImageProcessor(size={'''height''': 192, '''width''': 192} )
__magic_name__ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
__magic_name__ = image_processor(images=snake_case_ , return_tensors='''pt''' )
with torch.no_grad():
__magic_name__ = model(**snake_case_ ).logits
print(outputs.keys() )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case_ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(snake_case_ )
if push_to_hub:
print(f'Pushing model and image processor for {model_name} to hub' )
model.push_to_hub(f'microsoft/{model_name}' )
image_processor.push_to_hub(f'microsoft/{model_name}' )
if __name__ == "__main__":
a_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='swin-base-simmim-window6-192',
type=str,
choices=['swin-base-simmim-window6-192', 'swin-large-simmim-window12-192'],
help='Name of the Swin SimMIM model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth',
type=str,
help='Path to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
a_ : Optional[Any] = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub) | 678 | 0 |
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
_a = XGLMConfig
_a = {}
_a = "gelu"
def __init__( self , A , A=14 , A=7 , A=True , A=True , A=True , A=99 , A=32 , A=2 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=5_12 , A=0.02 , ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = seq_length
__magic_name__ = is_training
__magic_name__ = use_input_mask
__magic_name__ = use_labels
__magic_name__ = vocab_size
__magic_name__ = d_model
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = ffn_dim
__magic_name__ = activation_function
__magic_name__ = activation_dropout
__magic_name__ = attention_dropout
__magic_name__ = max_position_embeddings
__magic_name__ = initializer_range
__magic_name__ = None
__magic_name__ = 0
__magic_name__ = 2
__magic_name__ = 1
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
return XGLMConfig.from_pretrained('''facebook/xglm-564M''' )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
__magic_name__ = None
if self.use_input_mask:
__magic_name__ = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ = self.get_config()
__magic_name__ = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def __A ( self ) -> int:
'''simple docstring'''
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=A_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=A_ , )
def __A ( self ) -> Tuple:
'''simple docstring'''
__magic_name__ = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) = config_and_inputs
__magic_name__ = {
'''input_ids''': input_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
_a = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
_a = (TFXGLMForCausalLM,) if is_tf_available() else ()
_a = (
{"feature-extraction": TFXGLMModel, "text-generation": TFXGLMForCausalLM} if is_tf_available() else {}
)
_a = False
_a = False
_a = False
def __A ( self ) -> Tuple:
'''simple docstring'''
__magic_name__ = TFXGLMModelTester(self )
__magic_name__ = ConfigTester(self , config_class=A_ , n_embd=37 )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
@slow
def __A ( self ) -> Dict:
'''simple docstring'''
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ = TFXGLMModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@unittest.skip(reason='''Currently, model embeddings are going to undergo a major refactor.''' )
def __A ( self ) -> Any:
'''simple docstring'''
super().test_resize_token_embeddings()
@require_tf
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __A ( self , A=True ) -> List[str]:
'''simple docstring'''
__magic_name__ = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
__magic_name__ = tf.convert_to_tensor([[2, 2_68, 98_65]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
__magic_name__ = [2, 2_68, 98_65, 67, 11, 19_88, 5_72_52, 98_65, 5, 9_84, 67, 19_88, 21_38_38, 16_58, 53, 7_04_46, 33, 66_57, 2_78, 15_81]
# fmt: on
__magic_name__ = model.generate(A_ , do_sample=A_ , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , A_ )
@slow
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
__magic_name__ = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
tf.random.set_seed(0 )
__magic_name__ = tokenizer('''Today is a nice day and''' , return_tensors='''tf''' )
__magic_name__ = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(''':/CPU:0''' ):
__magic_name__ = model.generate(A_ , do_sample=A_ , seed=[7, 0] )
__magic_name__ = tokenizer.decode(output_ids[0] , skip_special_tokens=A_ )
__magic_name__ = (
'''Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'''
)
self.assertEqual(A_ , A_ )
@slow
def __A ( self ) -> Tuple:
'''simple docstring'''
__magic_name__ = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
__magic_name__ = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
__magic_name__ = '''left'''
# use different length sentences to test batching
__magic_name__ = [
'''This is an extremelly long sentence that only exists to test the ability of the model to cope with '''
'''left-padding, such as in batched generation. The output for the sequence below should be the same '''
'''regardless of whether left padding is applied or not. When''',
'''Hello, my dog is a little''',
]
__magic_name__ = tokenizer(A_ , return_tensors='''tf''' , padding=A_ )
__magic_name__ = inputs['''input_ids''']
__magic_name__ = model.generate(input_ids=A_ , attention_mask=inputs['''attention_mask'''] , max_new_tokens=12 )
__magic_name__ = tokenizer(sentences[0] , return_tensors='''tf''' ).input_ids
__magic_name__ = model.generate(input_ids=A_ , max_new_tokens=12 )
__magic_name__ = tokenizer(sentences[1] , return_tensors='''tf''' ).input_ids
__magic_name__ = model.generate(input_ids=A_ , max_new_tokens=12 )
__magic_name__ = tokenizer.batch_decode(A_ , skip_special_tokens=A_ )
__magic_name__ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=A_ )
__magic_name__ = tokenizer.decode(output_padded[0] , skip_special_tokens=A_ )
__magic_name__ = [
'''This is an extremelly long sentence that only exists to test the ability of the model to cope with '''
'''left-padding, such as in batched generation. The output for the sequence below should be the same '''
'''regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '''
'''a single''',
'''Hello, my dog is a little bit of a shy one, but he is very friendly''',
]
self.assertListEqual(A_ , A_ )
self.assertListEqual(A_ , [non_padded_sentence, padded_sentence] ) | 703 |
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
return "".join(sorted(snake_case_ ) )
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
return word_by_signature[signature(snake_case_ )]
a_ : str = Path(__file__).parent.joinpath('words.txt').read_text(encoding='utf-8')
a_ : Optional[Any] = sorted({word.strip().lower() for word in data.splitlines()})
a_ : List[Any] = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
a_ : Optional[Any] = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('anagrams.txt', 'w') as file:
file.write('all_anagrams = \n ')
file.write(pprint.pformat(all_anagrams)) | 678 | 0 |
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
__magic_name__ = args.pruning_method
__magic_name__ = args.threshold
__magic_name__ = args.model_name_or_path.rstrip('''/''' )
__magic_name__ = args.target_model_path
print(f'Load fine-pruned model from {model_name_or_path}' )
__magic_name__ = torch.load(os.path.join(_lowercase , '''pytorch_model.bin''' ) )
__magic_name__ = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
__magic_name__ = tensor
print(f'Copied layer {name}' )
elif "classifier" in name or "qa_output" in name:
__magic_name__ = tensor
print(f'Copied layer {name}' )
elif "bias" in name:
__magic_name__ = tensor
print(f'Copied layer {name}' )
else:
if pruning_method == "magnitude":
__magic_name__ = MagnitudeBinarizer.apply(inputs=_lowercase , threshold=_lowercase )
__magic_name__ = tensor * mask
print(f'Pruned layer {name}' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
__magic_name__ = name[:-6]
__magic_name__ = model[f'{prefix_}mask_scores']
__magic_name__ = TopKBinarizer.apply(_lowercase , _lowercase )
__magic_name__ = tensor * mask
print(f'Pruned layer {name}' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
__magic_name__ = name[:-6]
__magic_name__ = model[f'{prefix_}mask_scores']
__magic_name__ = ThresholdBinarizer.apply(_lowercase , _lowercase , _lowercase )
__magic_name__ = tensor * mask
print(f'Pruned layer {name}' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
__magic_name__ = name[:-6]
__magic_name__ = model[f'{prefix_}mask_scores']
__magic_name__ = -0.1, 1.1
__magic_name__ = torch.sigmoid(_lowercase )
__magic_name__ = s * (r - l) + l
__magic_name__ = s_bar.clamp(min=0.0 , max=1.0 )
__magic_name__ = tensor * mask
print(f'Pruned layer {name}' )
else:
raise ValueError('''Unknown pruning method''' )
if target_model_path is None:
__magic_name__ = os.path.join(
os.path.dirname(_lowercase ) , f'bertarized_{os.path.basename(_lowercase )}' )
if not os.path.isdir(_lowercase ):
shutil.copytree(_lowercase , _lowercase )
print(f'\nCreated folder {target_model_path}' )
torch.save(_lowercase , os.path.join(_lowercase , '''pytorch_model.bin''' ) )
print('''\nPruned model saved! See you later!''' )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
'--pruning_method',
choices=['l0', 'magnitude', 'topK', 'sigmoied_threshold'],
type=str,
required=True,
help=(
'Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'
' sigmoied_threshold = Soft movement pruning)'
),
)
parser.add_argument(
'--threshold',
type=float,
required=False,
help=(
'For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'
'For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'
'Not needed for `l0`'
),
)
parser.add_argument(
'--model_name_or_path',
type=str,
required=True,
help='Folder containing the model that was previously fine-pruned',
)
parser.add_argument(
'--target_model_path',
default=None,
type=str,
required=False,
help='Folder containing the model that was previously fine-pruned',
)
a_ = parser.parse_args()
main(args) | 704 |
from __future__ import annotations
from scipy.special import comb # type: ignore
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , A ) -> Tuple:
'''simple docstring'''
__magic_name__ = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
__magic_name__ = len(A ) - 1
def __A ( self , A ) -> list[float]:
'''simple docstring'''
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__magic_name__ = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , A ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(A ) , 5 ) == 1
return output_values
def __A ( self , A ) -> tuple[float, float]:
'''simple docstring'''
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__magic_name__ = self.basis_function(A )
__magic_name__ = 0.0
__magic_name__ = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def __A ( self , A = 0.01 ) -> Tuple:
'''simple docstring'''
from matplotlib import pyplot as plt # type: ignore
__magic_name__ = [] # x coordinates of points to plot
__magic_name__ = [] # y coordinates of points to plot
__magic_name__ = 0.0
while t <= 1:
__magic_name__ = self.bezier_curve_function(A )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
__magic_name__ = [i[0] for i in self.list_of_points]
__magic_name__ = [i[1] for i in self.list_of_points]
plt.plot(
A , A , color='''blue''' , label='''Curve of Degree ''' + str(self.degree ) , )
plt.scatter(A , A , color='''red''' , label='''Control Points''' )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3 | 678 | 0 |
'''simple docstring'''
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'The `inpainting.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionInpaintPipeline` instead.'
) | 705 |
import re
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
__magic_name__ = re.compile(
r'''^(?:0|94|\+94|0{2}94)''' r'''7(0|1|2|4|5|6|7|8)''' r'''(-| |)''' r'''\d{7}$''' )
return bool(re.search(snake_case_ , snake_case_ ) )
if __name__ == "__main__":
a_ : Optional[int] = '0094702343221'
print(is_sri_lankan_phone_number(phone)) | 678 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
a_ : int = logging.get_logger(__name__)
a_ : List[Any] = {
'shi-labs/nat-mini-in1k-224': 'https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class SCREAMING_SNAKE_CASE_ ( __lowerCamelCase , __lowerCamelCase ):
"""simple docstring"""
_a = """nat"""
_a = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , A=4 , A=3 , A=64 , A=[3, 4, 6, 5] , A=[2, 4, 8, 16] , A=7 , A=3.0 , A=True , A=0.0 , A=0.0 , A=0.1 , A="gelu" , A=0.02 , A=1E-5 , A=0.0 , A=None , A=None , **A , ) -> Tuple:
'''simple docstring'''
super().__init__(**UpperCAmelCase_ )
__magic_name__ = patch_size
__magic_name__ = num_channels
__magic_name__ = embed_dim
__magic_name__ = depths
__magic_name__ = len(UpperCAmelCase_ )
__magic_name__ = num_heads
__magic_name__ = kernel_size
__magic_name__ = mlp_ratio
__magic_name__ = qkv_bias
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = drop_path_rate
__magic_name__ = hidden_act
__magic_name__ = layer_norm_eps
__magic_name__ = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__magic_name__ = int(embed_dim * 2 ** (len(UpperCAmelCase_ ) - 1) )
__magic_name__ = layer_scale_init_value
__magic_name__ = ['stem'] + [F'stage{idx}' for idx in range(1 , len(UpperCAmelCase_ ) + 1 )]
__magic_name__ = get_aligned_output_features_output_indices(
out_features=UpperCAmelCase_ , out_indices=UpperCAmelCase_ , stage_names=self.stage_names )
| 706 |
import os
import sys
import unittest
a_ : int = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
a_ : Optional[Any] = os.path.join(git_repo_path, 'src', 'diffusers')
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = find_backend(''' if not is_torch_available():''' )
self.assertEqual(A , '''torch''' )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
__magic_name__ = find_backend(''' if not (is_torch_available() and is_transformers_available()):''' )
self.assertEqual(A , '''torch_and_transformers''' )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
__magic_name__ = find_backend(
''' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):''' )
self.assertEqual(A , '''torch_and_transformers_and_onnx''' )
def __A ( self ) -> str:
'''simple docstring'''
__magic_name__ = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , A )
self.assertIn('''torch_and_transformers''' , A )
self.assertIn('''flax_and_transformers''' , A )
self.assertIn('''torch_and_transformers_and_onnx''' , A )
# Likewise, we can't assert on the exact content of a key
self.assertIn('''UNet2DModel''' , objects['''torch'''] )
self.assertIn('''FlaxUNet2DConditionModel''' , objects['''flax'''] )
self.assertIn('''StableDiffusionPipeline''' , objects['''torch_and_transformers'''] )
self.assertIn('''FlaxStableDiffusionPipeline''' , objects['''flax_and_transformers'''] )
self.assertIn('''LMSDiscreteScheduler''' , objects['''torch_and_scipy'''] )
self.assertIn('''OnnxStableDiffusionPipeline''' , objects['''torch_and_transformers_and_onnx'''] )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = create_dummy_object('''CONSTANT''' , '''\'torch\'''' )
self.assertEqual(A , '''\nCONSTANT = None\n''' )
__magic_name__ = create_dummy_object('''function''' , '''\'torch\'''' )
self.assertEqual(
A , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' )
__magic_name__ = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
'''
__magic_name__ = create_dummy_object('''FakeClass''' , '''\'torch\'''' )
self.assertEqual(A , A )
def __A ( self ) -> int:
'''simple docstring'''
__magic_name__ = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
'''
__magic_name__ = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} )
self.assertEqual(dummy_files['''torch'''] , A ) | 678 | 0 |
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( snake_case_ : float , snake_case_ : float , snake_case_ : float ):
if days_between_payments <= 0:
raise ValueError('''days_between_payments must be > 0''' )
if daily_interest_rate < 0:
raise ValueError('''daily_interest_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * daily_interest_rate * days_between_payments
def _SCREAMING_SNAKE_CASE ( snake_case_ : float , snake_case_ : float , snake_case_ : float , ):
if number_of_compounding_periods <= 0:
raise ValueError('''number_of_compounding_periods must be > 0''' )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError('''nominal_annual_interest_rate_percentage must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def _SCREAMING_SNAKE_CASE ( snake_case_ : float , snake_case_ : float , snake_case_ : float , ):
if number_of_years <= 0:
raise ValueError('''number_of_years must be > 0''' )
if nominal_annual_percentage_rate < 0:
raise ValueError('''nominal_annual_percentage_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return compound_interest(
snake_case_ , nominal_annual_percentage_rate / 365 , number_of_years * 365 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 707 |
def _SCREAMING_SNAKE_CASE ( snake_case_ : list[list[int]] , snake_case_ : int , snake_case_ : int , snake_case_ : set ):
__magic_name__ , __magic_name__ = len(snake_case_ ), len(grid[0] )
if (
min(snake_case_ , snake_case_ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
__magic_name__ = 0
count += depth_first_search(snake_case_ , row + 1 , snake_case_ , snake_case_ )
count += depth_first_search(snake_case_ , row - 1 , snake_case_ , snake_case_ )
count += depth_first_search(snake_case_ , snake_case_ , col + 1 , snake_case_ )
count += depth_first_search(snake_case_ , snake_case_ , col - 1 , snake_case_ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod() | 678 | 0 |
import torch
from torch import nn
class SCREAMING_SNAKE_CASE_ ( nn.Module ):
"""simple docstring"""
def __init__( self , A , A , A , A , A=1 , A=False ) -> int:
'''simple docstring'''
super().__init__()
__magic_name__ = n_token
__magic_name__ = d_embed
__magic_name__ = d_proj
__magic_name__ = cutoffs + [n_token]
__magic_name__ = [0] + self.cutoffs
__magic_name__ = div_val
__magic_name__ = self.cutoffs[0]
__magic_name__ = len(self.cutoffs ) - 1
__magic_name__ = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
__magic_name__ = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
__magic_name__ = nn.Parameter(torch.zeros(self.n_clusters ) )
__magic_name__ = nn.ModuleList()
__magic_name__ = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(A , A ) ) )
else:
self.out_projs.append(A )
self.out_layers.append(nn.Linear(A , A ) )
else:
for i in range(len(self.cutoffs ) ):
__magic_name__ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
__magic_name__ = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(A , A ) ) )
self.out_layers.append(nn.Linear(A , r_idx - l_idx ) )
__magic_name__ = keep_order
def __A ( self , A , A , A , A ) -> List[str]:
'''simple docstring'''
if proj is None:
__magic_name__ = nn.functional.linear(A , A , bias=A )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
__magic_name__ = nn.functional.linear(A , proj.t().contiguous() )
__magic_name__ = nn.functional.linear(A , A , bias=A )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def __A ( self , A , A=None , A=False ) -> str:
'''simple docstring'''
if labels is not None:
# Shift so that tokens < n predict n
__magic_name__ = hidden[..., :-1, :].contiguous()
__magic_name__ = labels[..., 1:].contiguous()
__magic_name__ = hidden.view(-1 , hidden.size(-1 ) )
__magic_name__ = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('''Input and labels should have the same size in the batch dimension.''' )
else:
__magic_name__ = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
__magic_name__ = self._compute_logit(A , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
__magic_name__ = labels != -1_00
__magic_name__ = torch.zeros_like(A , dtype=hidden.dtype , device=hidden.device )
__magic_name__ = (
-nn.functional.log_softmax(A , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
__magic_name__ = nn.functional.log_softmax(A , dim=-1 )
else:
# construct weights and biases
__magic_name__ = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
__magic_name__ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
__magic_name__ = self.out_layers[0].weight[l_idx:r_idx]
__magic_name__ = self.out_layers[0].bias[l_idx:r_idx]
else:
__magic_name__ = self.out_layers[i].weight
__magic_name__ = self.out_layers[i].bias
if i == 0:
__magic_name__ = torch.cat([weight_i, self.cluster_weight] , dim=0 )
__magic_name__ = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(A )
biases.append(A )
__magic_name__ = weights[0], biases[0], self.out_projs[0]
__magic_name__ = self._compute_logit(A , A , A , A )
__magic_name__ = nn.functional.log_softmax(A , dim=1 )
if labels is None:
__magic_name__ = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
__magic_name__ = torch.zeros_like(A , dtype=hidden.dtype , device=hidden.device )
__magic_name__ = 0
__magic_name__ = [0] + self.cutoffs
for i in range(len(A ) - 1 ):
__magic_name__ = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
__magic_name__ = (labels >= l_idx) & (labels < r_idx)
__magic_name__ = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
__magic_name__ = labels.index_select(0 , A ) - l_idx
__magic_name__ = head_logprob.index_select(0 , A )
__magic_name__ = hidden.index_select(0 , A )
else:
__magic_name__ = hidden
if i == 0:
if labels is not None:
__magic_name__ = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
__magic_name__ = head_logprob[:, : self.cutoffs[0]]
else:
__magic_name__ = weights[i], biases[i], self.out_projs[i]
__magic_name__ = self._compute_logit(A , A , A , A )
__magic_name__ = nn.functional.log_softmax(A , dim=1 )
__magic_name__ = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
__magic_name__ = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
__magic_name__ = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
__magic_name__ = logprob_i
if labels is not None:
if (hasattr(self , '''keep_order''' ) and self.keep_order) or keep_order:
out.index_copy_(0 , A , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def __A ( self , A ) -> Tuple:
'''simple docstring'''
if self.n_clusters == 0:
__magic_name__ = self._compute_logit(A , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(A , dim=-1 )
else:
# construct weights and biases
__magic_name__ = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
__magic_name__ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
__magic_name__ = self.out_layers[0].weight[l_idx:r_idx]
__magic_name__ = self.out_layers[0].bias[l_idx:r_idx]
else:
__magic_name__ = self.out_layers[i].weight
__magic_name__ = self.out_layers[i].bias
if i == 0:
__magic_name__ = torch.cat([weight_i, self.cluster_weight] , dim=0 )
__magic_name__ = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(A )
biases.append(A )
__magic_name__ = weights[0], biases[0], self.out_projs[0]
__magic_name__ = self._compute_logit(A , A , A , A )
__magic_name__ = hidden.new_empty((head_logit.size(0 ), self.n_token) )
__magic_name__ = nn.functional.log_softmax(A , dim=1 )
__magic_name__ = [0] + self.cutoffs
for i in range(len(A ) - 1 ):
__magic_name__ = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
__magic_name__ = head_logprob[:, : self.cutoffs[0]]
else:
__magic_name__ = weights[i], biases[i], self.out_projs[i]
__magic_name__ = self._compute_logit(A , A , A , A )
__magic_name__ = nn.functional.log_softmax(A , dim=1 )
__magic_name__ = head_logprob[:, -i] + tail_logprob_i
__magic_name__ = logprob_i
return out | 708 |
a_ : Dict = {
'meter': 'm',
'kilometer': 'km',
'megametre': 'Mm',
'gigametre': 'Gm',
'terametre': 'Tm',
'petametre': 'Pm',
'exametre': 'Em',
'zettametre': 'Zm',
'yottametre': 'Ym',
}
# Exponent of the factor(meter)
a_ : str = {
'm': 0,
'km': 3,
'Mm': 6,
'Gm': 9,
'Tm': 12,
'Pm': 15,
'Em': 18,
'Zm': 21,
'Ym': 24,
}
def _SCREAMING_SNAKE_CASE ( snake_case_ : float , snake_case_ : str , snake_case_ : str ):
__magic_name__ = from_type.lower().strip('''s''' )
__magic_name__ = to_type.lower().strip('''s''' )
__magic_name__ = UNIT_SYMBOL.get(snake_case_ , snake_case_ )
__magic_name__ = UNIT_SYMBOL.get(snake_case_ , snake_case_ )
if from_sanitized not in METRIC_CONVERSION:
__magic_name__ = (
f'Invalid \'from_type\' value: {from_type!r}.\n'
f'Conversion abbreviations are: {", ".join(snake_case_ )}'
)
raise ValueError(snake_case_ )
if to_sanitized not in METRIC_CONVERSION:
__magic_name__ = (
f'Invalid \'to_type\' value: {to_type!r}.\n'
f'Conversion abbreviations are: {", ".join(snake_case_ )}'
)
raise ValueError(snake_case_ )
__magic_name__ = METRIC_CONVERSION[from_sanitized]
__magic_name__ = METRIC_CONVERSION[to_sanitized]
__magic_name__ = 1
if from_exponent > to_exponent:
__magic_name__ = from_exponent - to_exponent
else:
__magic_name__ = -(to_exponent - from_exponent)
return value * pow(10 , snake_case_ )
if __name__ == "__main__":
from doctest import testmod
testmod() | 678 | 0 |
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , A , A=3 , A=32 , A=3 , A=10 , A=[10, 20, 30, 40] , A=[1, 1, 2, 1] , A=True , A=True , A="relu" , A=3 , A=None , ) -> int:
'''simple docstring'''
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = image_size
__magic_name__ = num_channels
__magic_name__ = embeddings_size
__magic_name__ = hidden_sizes
__magic_name__ = depths
__magic_name__ = is_training
__magic_name__ = use_labels
__magic_name__ = hidden_act
__magic_name__ = num_labels
__magic_name__ = scope
__magic_name__ = len(_lowerCAmelCase )
def __A ( self ) -> str:
'''simple docstring'''
__magic_name__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__magic_name__ = None
if self.use_labels:
__magic_name__ = ids_tensor([self.batch_size] , self.num_labels )
__magic_name__ = self.get_config()
return config, pixel_values, labels
def __A ( self ) -> Any:
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def __A ( self , A , A , A ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = TFRegNetModel(config=_lowerCAmelCase )
__magic_name__ = model(_lowerCAmelCase , training=_lowerCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __A ( self , A , A , A ) -> int:
'''simple docstring'''
__magic_name__ = self.num_labels
__magic_name__ = TFRegNetForImageClassification(_lowerCAmelCase )
__magic_name__ = model(_lowerCAmelCase , labels=_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self ) -> int:
'''simple docstring'''
__magic_name__ = self.prepare_config_and_inputs()
__magic_name__ , __magic_name__ , __magic_name__ = config_and_inputs
__magic_name__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_a = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
_a = (
{"""feature-extraction""": TFRegNetModel, """image-classification""": TFRegNetForImageClassification}
if is_tf_available()
else {}
)
_a = False
_a = False
_a = False
_a = False
_a = False
def __A ( self ) -> List[str]:
'''simple docstring'''
__magic_name__ = TFRegNetModelTester(self )
__magic_name__ = ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase )
def __A ( self ) -> Dict:
'''simple docstring'''
return
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , )
@slow
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
super().test_keras_fit()
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
pass
def __A ( self ) -> str:
'''simple docstring'''
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ = model_class(_lowerCAmelCase )
__magic_name__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ = [*signature.parameters.keys()]
__magic_name__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def __A ( self ) -> Any:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
def check_hidden_states_output(A , A , A ):
__magic_name__ = model_class(_lowerCAmelCase )
__magic_name__ = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) , training=_lowerCAmelCase )
__magic_name__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__magic_name__ = self.model_tester.num_stages
self.assertEqual(len(_lowerCAmelCase ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
__magic_name__ = layer_type
__magic_name__ = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__magic_name__ = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(A , A , A , A={} ):
__magic_name__ = model(_lowerCAmelCase , return_dict=_lowerCAmelCase , **_lowerCAmelCase )
__magic_name__ = model(_lowerCAmelCase , return_dict=_lowerCAmelCase , **_lowerCAmelCase ).to_tuple()
def recursive_check(A , A ):
if isinstance(_lowerCAmelCase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_lowerCAmelCase , _lowerCAmelCase ):
recursive_check(_lowerCAmelCase , _lowerCAmelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(_lowerCAmelCase , _lowerCAmelCase ) ) , msg=(
'''Tuple and dict output are not equal. Difference:'''
F' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}'
) , )
recursive_check(_lowerCAmelCase , _lowerCAmelCase )
for model_class in self.all_model_classes:
__magic_name__ = model_class(_lowerCAmelCase )
__magic_name__ = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase )
__magic_name__ = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase )
check_equivalence(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
__magic_name__ = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
__magic_name__ = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
check_equivalence(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
__magic_name__ = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase )
__magic_name__ = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase )
check_equivalence(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , {'''output_hidden_states''': True} )
__magic_name__ = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
__magic_name__ = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
check_equivalence(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , {'''output_hidden_states''': True} )
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def __A ( self ) -> Dict:
'''simple docstring'''
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ = TFRegNetModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def _SCREAMING_SNAKE_CASE ( ):
__magic_name__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __A ( self ) -> Any:
'''simple docstring'''
__magic_name__ = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
__magic_name__ = self.default_image_processor
__magic_name__ = prepare_img()
__magic_name__ = image_processor(images=_lowerCAmelCase , return_tensors='''tf''' )
# forward pass
__magic_name__ = model(**_lowerCAmelCase , training=_lowerCAmelCase )
# verify the logits
__magic_name__ = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
__magic_name__ = tf.constant([-0.41_80, -1.50_51, -3.48_36] )
tf.debugging.assert_near(outputs.logits[0, :3] , _lowerCAmelCase , atol=1E-4 ) | 709 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
a_ : Union[str, Any] = {
'configuration_longt5': ['LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LongT5Config', 'LongT5OnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : int = [
'LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'LongT5EncoderModel',
'LongT5ForConditionalGeneration',
'LongT5Model',
'LongT5PreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Dict = [
'FlaxLongT5ForConditionalGeneration',
'FlaxLongT5Model',
'FlaxLongT5PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
a_ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 678 | 0 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , A , A=7 , A=3 , A=30 , A=4_00 , A=True , A=None , A=True , A=[0.5, 0.5, 0.5] , A=[0.5, 0.5, 0.5] , A=True , A=1 / 2_55 , A=True , ) -> int:
'''simple docstring'''
__magic_name__ = size if size is not None else {"shortest_edge": 18, "longest_edge": 13_33}
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = num_channels
__magic_name__ = min_resolution
__magic_name__ = max_resolution
__magic_name__ = do_resize
__magic_name__ = size
__magic_name__ = do_normalize
__magic_name__ = image_mean
__magic_name__ = image_std
__magic_name__ = do_rescale
__magic_name__ = rescale_factor
__magic_name__ = do_pad
def __A ( self ) -> Optional[int]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __A ( self , A , A=False ) -> Dict:
'''simple docstring'''
if not batched:
__magic_name__ = image_inputs[0]
if isinstance(UpperCamelCase_ , Image.Image ):
__magic_name__ = image.size
else:
__magic_name__ = image.shape[1], image.shape[2]
if w < h:
__magic_name__ = int(self.size['''shortest_edge'''] * h / w )
__magic_name__ = self.size["shortest_edge"]
elif w > h:
__magic_name__ = self.size["shortest_edge"]
__magic_name__ = int(self.size['''shortest_edge'''] * w / h )
else:
__magic_name__ = self.size["shortest_edge"]
__magic_name__ = self.size["shortest_edge"]
else:
__magic_name__ = []
for image in image_inputs:
__magic_name__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__magic_name__ = max(UpperCamelCase_ , key=lambda A : item[0] )[0]
__magic_name__ = max(UpperCamelCase_ , key=lambda A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_a = DeformableDetrImageProcessor if is_vision_available() else None
def __A ( self ) -> List[Any]:
'''simple docstring'''
__magic_name__ = DeformableDetrImageProcessingTester(self )
@property
def __A ( self ) -> Dict:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase_ , '''image_mean''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''image_std''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_normalize''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_rescale''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_pad''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''size''' ) )
def __A ( self ) -> int:
'''simple docstring'''
__magic_name__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 13_33} )
self.assertEqual(image_processor.do_pad , UpperCamelCase_ )
__magic_name__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCamelCase_ )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , UpperCamelCase_ )
def __A ( self ) -> str:
'''simple docstring'''
pass
def __A ( self ) -> List[str]:
'''simple docstring'''
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , Image.Image )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
__magic_name__ = self.image_processor_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__magic_name__ = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
__magic_name__ = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , np.ndarray )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
__magic_name__ = self.image_processor_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__magic_name__ = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
__magic_name__ = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __A ( self ) -> Any:
'''simple docstring'''
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , torch.Tensor )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
__magic_name__ = self.image_processor_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__magic_name__ = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
__magic_name__ = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
__magic_name__ = json.loads(f.read() )
__magic_name__ = {"image_id": 3_97_69, "annotations": target}
# encode them
__magic_name__ = DeformableDetrImageProcessor()
__magic_name__ = image_processing(images=UpperCamelCase_ , annotations=UpperCamelCase_ , return_tensors='''pt''' )
# verify pixel values
__magic_name__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , UpperCamelCase_ )
__magic_name__ = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCamelCase_ , atol=1E-4 ) )
# verify area
__magic_name__ = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCamelCase_ ) )
# verify boxes
__magic_name__ = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCamelCase_ )
__magic_name__ = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCamelCase_ , atol=1E-3 ) )
# verify image_id
__magic_name__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCamelCase_ ) )
# verify is_crowd
__magic_name__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCamelCase_ ) )
# verify class_labels
__magic_name__ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCamelCase_ ) )
# verify orig_size
__magic_name__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCamelCase_ ) )
# verify size
__magic_name__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCamelCase_ ) )
@slow
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
__magic_name__ = json.loads(f.read() )
__magic_name__ = {"file_name": "000000039769.png", "image_id": 3_97_69, "segments_info": target}
__magic_name__ = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
__magic_name__ = DeformableDetrImageProcessor(format='''coco_panoptic''' )
__magic_name__ = image_processing(images=UpperCamelCase_ , annotations=UpperCamelCase_ , masks_path=UpperCamelCase_ , return_tensors='''pt''' )
# verify pixel values
__magic_name__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , UpperCamelCase_ )
__magic_name__ = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCamelCase_ , atol=1E-4 ) )
# verify area
__magic_name__ = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCamelCase_ ) )
# verify boxes
__magic_name__ = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCamelCase_ )
__magic_name__ = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCamelCase_ , atol=1E-3 ) )
# verify image_id
__magic_name__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCamelCase_ ) )
# verify is_crowd
__magic_name__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCamelCase_ ) )
# verify class_labels
__magic_name__ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCamelCase_ ) )
# verify masks
__magic_name__ = 82_28_73
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , UpperCamelCase_ )
# verify orig_size
__magic_name__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCamelCase_ ) )
# verify size
__magic_name__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCamelCase_ ) ) | 710 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , A , A=13 , A=7 , A=True , A=True , A=True , A=99 , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=5_12 , A=16 , A=2 , A=0.02 , A=3 , A=4 , A=None , ) -> str:
'''simple docstring'''
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = seq_length
__magic_name__ = is_training
__magic_name__ = use_token_type_ids
__magic_name__ = use_labels
__magic_name__ = vocab_size
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = intermediate_size
__magic_name__ = hidden_act
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = max_position_embeddings
__magic_name__ = type_vocab_size
__magic_name__ = type_sequence_label_size
__magic_name__ = initializer_range
__magic_name__ = num_labels
__magic_name__ = num_choices
__magic_name__ = scope
__magic_name__ = self.vocab_size - 1
def __A ( self ) -> str:
'''simple docstring'''
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ = None
if self.use_token_type_ids:
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
if self.use_labels:
__magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
__magic_name__ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def __A ( self , A , A , A , A , *A ) -> Tuple:
'''simple docstring'''
__magic_name__ = OpenAIGPTModel(config=A )
model.to(A )
model.eval()
__magic_name__ = model(A , token_type_ids=A , head_mask=A )
__magic_name__ = model(A , token_type_ids=A )
__magic_name__ = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , A , A , A , A , *A ) -> Dict:
'''simple docstring'''
__magic_name__ = OpenAIGPTLMHeadModel(A )
model.to(A )
model.eval()
__magic_name__ = model(A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , A , A , A , A , *A ) -> List[Any]:
'''simple docstring'''
__magic_name__ = OpenAIGPTDoubleHeadsModel(A )
model.to(A )
model.eval()
__magic_name__ = model(A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , A , A , A , A , *A ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = self.num_labels
__magic_name__ = OpenAIGPTForSequenceClassification(A )
model.to(A )
model.eval()
__magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ = model(A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self ) -> Dict:
'''simple docstring'''
__magic_name__ = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) = config_and_inputs
__magic_name__ = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_a = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
_a = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
_a = (
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def __A ( self , A , A , A , A , A ) -> List[str]:
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def __A ( self , A , A , A=False ) -> List[str]:
'''simple docstring'''
__magic_name__ = super()._prepare_for_class(A , A , return_labels=A )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
__magic_name__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=A , )
__magic_name__ = inputs_dict['''labels''']
__magic_name__ = inputs_dict['''labels''']
__magic_name__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=A , )
__magic_name__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A )
return inputs_dict
def __A ( self ) -> str:
'''simple docstring'''
__magic_name__ = OpenAIGPTModelTester(self )
__magic_name__ = ConfigTester(self , config_class=A , n_embd=37 )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __A ( self ) -> Any:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*A )
def __A ( self ) -> Dict:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*A )
def __A ( self ) -> List[str]:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*A )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*A )
@slow
def __A ( self ) -> List[str]:
'''simple docstring'''
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ = OpenAIGPTModel.from_pretrained(A )
self.assertIsNotNone(A )
@require_torch
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __A ( self ) -> Tuple:
'''simple docstring'''
__magic_name__ = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' )
model.to(A )
__magic_name__ = torch.tensor([[4_81, 47_35, 5_44]] , dtype=torch.long , device=A ) # the president is
__magic_name__ = [
4_81,
47_35,
5_44,
2_46,
9_63,
8_70,
7_62,
2_39,
2_44,
4_04_77,
2_44,
2_49,
7_19,
8_81,
4_87,
5_44,
2_40,
2_44,
6_03,
4_81,
] # the president is a very good man. " \n " i\'m sure he is, " said the
__magic_name__ = model.generate(A , do_sample=A )
self.assertListEqual(output_ids[0].tolist() , A ) | 678 | 0 |
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
a_ : int = TypeVar('T')
a_ : Any = TypeVar('U')
class SCREAMING_SNAKE_CASE_ ( Generic[T, U] ):
"""simple docstring"""
def __init__( self , A , A ) -> int:
'''simple docstring'''
__magic_name__ = key
__magic_name__ = val
__magic_name__ = None
__magic_name__ = None
def __repr__( self ) -> Optional[Any]:
'''simple docstring'''
return (
F'Node: key: {self.key}, val: {self.val}, '
F'has next: {bool(self.next )}, has prev: {bool(self.prev )}'
)
class SCREAMING_SNAKE_CASE_ ( Generic[T, U] ):
"""simple docstring"""
def __init__( self ) -> Tuple:
'''simple docstring'''
__magic_name__ = DoubleLinkedListNode(__A , __A )
__magic_name__ = DoubleLinkedListNode(__A , __A )
__magic_name__ = self.rear, self.head
def __repr__( self ) -> int:
'''simple docstring'''
__magic_name__ = ["DoubleLinkedList"]
__magic_name__ = self.head
while node.next is not None:
rep.append(str(__A ) )
__magic_name__ = node.next
rep.append(str(self.rear ) )
return ",\n ".join(__A )
def __A ( self , A ) -> Tuple:
'''simple docstring'''
__magic_name__ = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
__magic_name__ = node
__magic_name__ = previous
__magic_name__ = node
__magic_name__ = self.rear
def __A ( self , A ) -> Dict:
'''simple docstring'''
if node.prev is None or node.next is None:
return None
__magic_name__ = node.next
__magic_name__ = node.prev
__magic_name__ = None
__magic_name__ = None
return node
class SCREAMING_SNAKE_CASE_ ( Generic[T, U] ):
"""simple docstring"""
_a = {}
def __init__( self , A ) -> str:
'''simple docstring'''
__magic_name__ = DoubleLinkedList()
__magic_name__ = capacity
__magic_name__ = 0
__magic_name__ = 0
__magic_name__ = 0
__magic_name__ = {}
def __repr__( self ) -> Optional[int]:
'''simple docstring'''
return (
F'CacheInfo(hits={self.hits}, misses={self.miss}, '
F'capacity={self.capacity}, current size={self.num_keys})'
)
def __contains__( self , A ) -> Union[str, Any]:
'''simple docstring'''
return key in self.cache
def __A ( self , A ) -> str:
'''simple docstring'''
if key in self.cache:
self.hits += 1
__magic_name__ = self.cache[key]
__magic_name__ = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(__A )
return node.val
self.miss += 1
return None
def __A ( self , A , A ) -> str:
'''simple docstring'''
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
__magic_name__ = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(__A ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
__magic_name__ = DoubleLinkedListNode(__A , __A )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
__magic_name__ = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
__magic_name__ = value
self.list.add(__A )
@classmethod
def __A ( cls , A = 1_28 ) -> Dict:
'''simple docstring'''
def cache_decorator_inner(A ) -> Callable[..., U]:
def cache_decorator_wrapper(*A ) -> U:
if func not in cls.decorator_function_to_instance_map:
__magic_name__ = LRUCache(__A )
__magic_name__ = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
__magic_name__ = func(*__A )
cls.decorator_function_to_instance_map[func].put(args[0] , __A )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(__A , '''cache_info''' , __A ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod() | 711 |
def _SCREAMING_SNAKE_CASE ( ):
__magic_name__ = []
__magic_name__ = 1
while len(snake_case_ ) < 1E6:
constant.append(str(snake_case_ ) )
i += 1
__magic_name__ = ''''''.join(snake_case_ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[9_9999] )
* int(constant[99_9999] )
)
if __name__ == "__main__":
print(solution()) | 678 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
a_ : int = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
a_ : List[Any] = TaTokenizerFast
a_ : Dict = {"configuration_mt5": ["MT5Config", "MT5OnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Any = [
"MT5EncoderModel",
"MT5ForConditionalGeneration",
"MT5ForQuestionAnswering",
"MT5Model",
"MT5PreTrainedModel",
"MT5Stack",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : str = ["TFMT5EncoderModel", "TFMT5ForConditionalGeneration", "TFMT5Model"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[int] = ["FlaxMT5EncoderModel", "FlaxMT5ForConditionalGeneration", "FlaxMT5Model"]
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
a_ : Tuple = _LazyModule(
__name__,
globals()['__file__'],
_import_structure,
extra_objects={'MT5Tokenizer': MTaTokenizer, 'MT5TokenizerFast': MTaTokenizerFast},
module_spec=__spec__,
) | 712 |
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
a_ : str = True
except ImportError:
a_ : Optional[int] = False
try:
from torch.hub import _get_torch_home
a_ : Optional[Any] = _get_torch_home()
except ImportError:
a_ : List[Any] = os.path.expanduser(
os.getenv('TORCH_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch'))
)
a_ : Any = os.path.join(torch_cache_home, 'transformers')
a_ : Any = 'https://cdn.huggingface.co'
a_ : Any = 'https://s3.amazonaws.com/models.huggingface.co/bert'
a_ : int = '/'.join(str(Path(__file__).resolve()).split('/')[:-1])
a_ : Any = os.path.join(PATH, 'config.yaml')
a_ : Any = os.path.join(PATH, 'attributes.txt')
a_ : Any = os.path.join(PATH, 'objects.txt')
a_ : List[Any] = os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', default_cache_path)
a_ : Any = os.getenv('PYTORCH_TRANSFORMERS_CACHE', PYTORCH_PRETRAINED_BERT_CACHE)
a_ : Optional[int] = os.getenv('TRANSFORMERS_CACHE', PYTORCH_TRANSFORMERS_CACHE)
a_ : int = 'pytorch_model.bin'
a_ : Union[str, Any] = 'config.yaml'
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any]=OBJECTS , snake_case_ : str=ATTRIBUTES ):
__magic_name__ = []
with open(snake_case_ ) as f:
for object in f.readlines():
vg_classes.append(object.split(''',''' )[0].lower().strip() )
__magic_name__ = []
with open(snake_case_ ) as f:
for object in f.readlines():
vg_attrs.append(object.split(''',''' )[0].lower().strip() )
return vg_classes, vg_attrs
def _SCREAMING_SNAKE_CASE ( snake_case_ : int ):
__magic_name__ = OrderedDict()
with open(snake_case_ , '''rb''' ) as f:
__magic_name__ = pkl.load(snake_case_ )['''model''']
for k in copy.deepcopy(list(ckp.keys() ) ):
__magic_name__ = ckp.pop(snake_case_ )
if isinstance(snake_case_ , np.ndarray ):
__magic_name__ = torch.tensor(snake_case_ )
else:
assert isinstance(snake_case_ , torch.tensor ), type(snake_case_ )
__magic_name__ = v
return r
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
_a = {}
def __init__( self , A , A = "root" , A=0 ) -> List[str]:
'''simple docstring'''
__magic_name__ = name
__magic_name__ = level
__magic_name__ = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
__magic_name__ = copy.deepcopy(A )
__magic_name__ = copy.deepcopy(A )
if isinstance(A , A ):
__magic_name__ = Config(A , name=A , level=level + 1 )
__magic_name__ = v
setattr(self , A , A )
__magic_name__ = d
def __repr__( self ) -> Union[str, Any]:
'''simple docstring'''
return str(list((self._pointer.keys()) ) )
def __setattr__( self , A , A ) -> Tuple:
'''simple docstring'''
__magic_name__ = val
__magic_name__ = val
__magic_name__ = key.split('''.''' )
__magic_name__ = len(A ) - 1
__magic_name__ = self._pointer
if len(A ) > 1:
for i, l in enumerate(A ):
if hasattr(self , A ) and isinstance(getattr(self , A ) , A ):
setattr(getattr(self , A ) , '''.'''.join(levels[i:] ) , A )
if l == last_level:
__magic_name__ = val
else:
__magic_name__ = pointer[l]
def __A ( self ) -> List[Any]:
'''simple docstring'''
return self._pointer
def __A ( self , A , A ) -> Any:
'''simple docstring'''
with open(F'{file_name}' , '''w''' ) as stream:
dump(A , A )
def __A ( self , A , A ) -> List[Any]:
'''simple docstring'''
with open(F'{file_name}' , '''w''' ) as stream:
json.dump(A , A )
@staticmethod
def __A ( A ) -> Optional[Any]:
'''simple docstring'''
with open(A ) as stream:
__magic_name__ = load(A , Loader=A )
return data
def __str__( self ) -> List[Any]:
'''simple docstring'''
__magic_name__ = ''' '''
if self._name != "root":
__magic_name__ = F'{t * (self._level-1)}{self._name}:\n'
else:
__magic_name__ = ''''''
__magic_name__ = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(A , A ):
r += F'{t * (self._level)}{v}\n'
self._level += 1
else:
r += F'{t * (self._level)}{k}: {v} ({type(A ).__name__})\n'
__magic_name__ = level
return r[:-1]
@classmethod
def __A ( cls , A , **A ) -> int:
'''simple docstring'''
__magic_name__ , __magic_name__ = cls.get_config_dict(A , **A )
return cls(A )
@classmethod
def __A ( cls , A , **A ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = kwargs.pop('''cache_dir''' , A )
__magic_name__ = kwargs.pop('''force_download''' , A )
__magic_name__ = kwargs.pop('''resume_download''' , A )
__magic_name__ = kwargs.pop('''proxies''' , A )
__magic_name__ = kwargs.pop('''local_files_only''' , A )
if os.path.isdir(A ):
__magic_name__ = os.path.join(A , A )
elif os.path.isfile(A ) or is_remote_url(A ):
__magic_name__ = pretrained_model_name_or_path
else:
__magic_name__ = hf_bucket_url(A , filename=A , use_cdn=A )
try:
# Load from URL or cache if already cached
__magic_name__ = cached_path(
A , cache_dir=A , force_download=A , proxies=A , resume_download=A , local_files_only=A , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
__magic_name__ = Config.load_yaml(A )
except EnvironmentError:
__magic_name__ = '''Can\'t load config for'''
raise EnvironmentError(A )
if resolved_config_file == config_file:
print('''loading configuration file from path''' )
else:
print('''loading configuration file cache''' )
return Config.load_yaml(A ), kwargs
def _SCREAMING_SNAKE_CASE ( snake_case_ : Tuple ):
__magic_name__ = torch.load('''dump.pt''' , map_location=in_tensor.device )
__magic_name__ = in_tensor.numpy()
__magic_name__ = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(snake_case_ , snake_case_ , rtol=0.01 , atol=0.1 ), (
f'{sum([1 for x in np.isclose(snake_case_ , snake_case_ , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %'
" element-wise mismatch"
)
raise Exception('''tensors are all good''' )
# Hugging face functions below
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] ):
__magic_name__ = urlparse(snake_case_ )
return parsed.scheme in ("http", "https")
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : str , snake_case_ : Optional[Any]=True ):
__magic_name__ = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
__magic_name__ = '''/''' not in model_id
if legacy_format:
return f'{endpoint}/{model_id}-{filename}'
else:
return f'{endpoint}/{model_id}/{filename}'
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Tuple , snake_case_ : List[str]=None , snake_case_ : Dict=0 , snake_case_ : Tuple=None , ):
__magic_name__ = '''python/{}'''.format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(snake_case_ , snake_case_ ):
ua += "; " + "; ".join('''{}/{}'''.format(snake_case_ , snake_case_ ) for k, v in user_agent.items() )
elif isinstance(snake_case_ , snake_case_ ):
ua += "; " + user_agent
__magic_name__ = {'''user-agent''': ua}
if resume_size > 0:
__magic_name__ = '''bytes=%d-''' % (resume_size,)
__magic_name__ = requests.get(snake_case_ , stream=snake_case_ , proxies=snake_case_ , headers=snake_case_ )
if response.status_code == 416: # Range not satisfiable
return
__magic_name__ = response.headers.get('''Content-Length''' )
__magic_name__ = resume_size + int(snake_case_ ) if content_length is not None else None
__magic_name__ = tqdm(
unit='''B''' , unit_scale=snake_case_ , total=snake_case_ , initial=snake_case_ , desc='''Downloading''' , )
for chunk in response.iter_content(chunk_size=1024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(snake_case_ ) )
temp_file.write(snake_case_ )
progress.close()
def _SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : Dict=None , snake_case_ : int=False , snake_case_ : List[Any]=None , snake_case_ : Tuple=10 , snake_case_ : int=False , snake_case_ : Any=None , snake_case_ : Tuple=False , ):
if cache_dir is None:
__magic_name__ = TRANSFORMERS_CACHE
if isinstance(snake_case_ , snake_case_ ):
__magic_name__ = str(snake_case_ )
os.makedirs(snake_case_ , exist_ok=snake_case_ )
__magic_name__ = None
if not local_files_only:
try:
__magic_name__ = requests.head(snake_case_ , allow_redirects=snake_case_ , proxies=snake_case_ , timeout=snake_case_ )
if response.status_code == 200:
__magic_name__ = response.headers.get('''ETag''' )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
__magic_name__ = url_to_filename(snake_case_ , snake_case_ )
# get cache path to put the file
__magic_name__ = os.path.join(snake_case_ , snake_case_ )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(snake_case_ ):
return cache_path
else:
__magic_name__ = [
file
for file in fnmatch.filter(os.listdir(snake_case_ ) , filename + '''.*''' )
if not file.endswith('''.json''' ) and not file.endswith('''.lock''' )
]
if len(snake_case_ ) > 0:
return os.path.join(snake_case_ , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
'''Cannot find the requested files in the cached path and outgoing traffic has been'''
''' disabled. To enable model look-ups and downloads online, set \'local_files_only\''''
''' to False.''' )
return None
# From now on, etag is not None.
if os.path.exists(snake_case_ ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
__magic_name__ = cache_path + '''.lock'''
with FileLock(snake_case_ ):
# If the download just completed while the lock was activated.
if os.path.exists(snake_case_ ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
__magic_name__ = cache_path + '''.incomplete'''
@contextmanager
def _resumable_file_manager():
with open(snake_case_ , '''a+b''' ) as f:
yield f
__magic_name__ = _resumable_file_manager
if os.path.exists(snake_case_ ):
__magic_name__ = os.stat(snake_case_ ).st_size
else:
__magic_name__ = 0
else:
__magic_name__ = partial(tempfile.NamedTemporaryFile , dir=snake_case_ , delete=snake_case_ )
__magic_name__ = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
'''%s not found in cache or force_download set to True, downloading to %s''' , snake_case_ , temp_file.name , )
http_get(
snake_case_ , snake_case_ , proxies=snake_case_ , resume_size=snake_case_ , user_agent=snake_case_ , )
os.replace(temp_file.name , snake_case_ )
__magic_name__ = {'''url''': url, '''etag''': etag}
__magic_name__ = cache_path + '''.json'''
with open(snake_case_ , '''w''' ) as meta_file:
json.dump(snake_case_ , snake_case_ )
return cache_path
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] , snake_case_ : List[Any]=None ):
__magic_name__ = url.encode('''utf-8''' )
__magic_name__ = shaaaa(snake_case_ )
__magic_name__ = url_hash.hexdigest()
if etag:
__magic_name__ = etag.encode('''utf-8''' )
__magic_name__ = shaaaa(snake_case_ )
filename += "." + etag_hash.hexdigest()
if url.endswith('''.h5''' ):
filename += ".h5"
return filename
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : str=None , snake_case_ : Tuple=False , snake_case_ : Union[str, Any]=None , snake_case_ : List[Any]=False , snake_case_ : Union[str, Any]=None , snake_case_ : List[str]=False , snake_case_ : Optional[int]=False , snake_case_ : Optional[int]=False , ):
if cache_dir is None:
__magic_name__ = TRANSFORMERS_CACHE
if isinstance(snake_case_ , snake_case_ ):
__magic_name__ = str(snake_case_ )
if isinstance(snake_case_ , snake_case_ ):
__magic_name__ = str(snake_case_ )
if is_remote_url(snake_case_ ):
# URL, so get it from the cache (downloading if necessary)
__magic_name__ = get_from_cache(
snake_case_ , cache_dir=snake_case_ , force_download=snake_case_ , proxies=snake_case_ , resume_download=snake_case_ , user_agent=snake_case_ , local_files_only=snake_case_ , )
elif os.path.exists(snake_case_ ):
# File, and it exists.
__magic_name__ = url_or_filename
elif urlparse(snake_case_ ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError('''file {} not found'''.format(snake_case_ ) )
else:
# Something unknown
raise ValueError('''unable to parse {} as a URL or as a local path'''.format(snake_case_ ) )
if extract_compressed_file:
if not is_zipfile(snake_case_ ) and not tarfile.is_tarfile(snake_case_ ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
__magic_name__ , __magic_name__ = os.path.split(snake_case_ )
__magic_name__ = output_file.replace('''.''' , '''-''' ) + '''-extracted'''
__magic_name__ = os.path.join(snake_case_ , snake_case_ )
if os.path.isdir(snake_case_ ) and os.listdir(snake_case_ ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
__magic_name__ = output_path + '''.lock'''
with FileLock(snake_case_ ):
shutil.rmtree(snake_case_ , ignore_errors=snake_case_ )
os.makedirs(snake_case_ )
if is_zipfile(snake_case_ ):
with ZipFile(snake_case_ , '''r''' ) as zip_file:
zip_file.extractall(snake_case_ )
zip_file.close()
elif tarfile.is_tarfile(snake_case_ ):
__magic_name__ = tarfile.open(snake_case_ )
tar_file.extractall(snake_case_ )
tar_file.close()
else:
raise EnvironmentError('''Archive format of {} could not be identified'''.format(snake_case_ ) )
return output_path_extracted
return output_path
def _SCREAMING_SNAKE_CASE ( snake_case_ : Dict , snake_case_ : int="," ):
assert isinstance(snake_case_ , snake_case_ )
if os.path.isfile(snake_case_ ):
with open(snake_case_ ) as f:
__magic_name__ = eval(f.read() )
else:
__magic_name__ = requests.get(snake_case_ )
try:
__magic_name__ = requests.json()
except Exception:
__magic_name__ = req.content.decode()
assert data is not None, "could not connect"
try:
__magic_name__ = eval(snake_case_ )
except Exception:
__magic_name__ = data.split('''\n''' )
req.close()
return data
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] ):
__magic_name__ = requests.get(snake_case_ )
__magic_name__ = np.array(Image.open(BytesIO(response.content ) ) )
return img
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] ):
__magic_name__ = url.split('''/''' )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(snake_case_ )
with open(snake_case_ , '''rb''' ) as stream:
__magic_name__ = pkl.load(snake_case_ )
__magic_name__ = weights.pop('''model''' )
__magic_name__ = {}
for k, v in model.items():
__magic_name__ = torch.from_numpy(snake_case_ )
if "running_var" in k:
__magic_name__ = torch.tensor([0] )
__magic_name__ = k.replace('''running_var''' , '''num_batches_tracked''' )
__magic_name__ = zero
return new
def _SCREAMING_SNAKE_CASE ( ):
print(f'{os.path.abspath(os.path.join(snake_case_ , os.pardir ) )}/demo.ipynb' )
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Tuple="RGB" ):
assert isinstance(snake_case_ , snake_case_ )
if os.path.isfile(snake_case_ ):
__magic_name__ = cva.imread(snake_case_ )
else:
__magic_name__ = get_image_from_url(snake_case_ )
assert img is not None, f'could not connect to: {im}'
__magic_name__ = cva.cvtColor(snake_case_ , cva.COLOR_BGR2RGB )
if input_format == "RGB":
__magic_name__ = img[:, :, ::-1]
return img
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Dict=1 ):
return (images[i : i + batch] for i in range(0 , len(snake_case_ ) , snake_case_ )) | 678 | 0 |
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] ):
return 1.0 / (1.0 + np.exp(-_outputs ))
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] ):
__magic_name__ = np.max(_outputs , axis=-1 , keepdims=__UpperCAmelCase )
__magic_name__ = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=__UpperCAmelCase )
class SCREAMING_SNAKE_CASE_ ( _snake_case ):
"""simple docstring"""
_a = 'sigmoid'
_a = 'softmax'
_a = 'none'
@add_end_docstrings(
_snake_case , r"""\n return_all_scores (`bool`, *optional*, defaults to `False`):\n Whether to return all prediction scores or just the one of the predicted class.\n function_to_apply (`str`, *optional*, defaults to `\"default\"`):\n The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:\n\n - `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model\n has several labels, will apply the softmax function on the output.\n - `\"sigmoid\"`: Applies the sigmoid function on the output.\n - `\"softmax\"`: Applies the softmax function on the output.\n - `\"none\"`: Does not apply any function on the output.\n """ , )
class SCREAMING_SNAKE_CASE_ ( _snake_case ):
"""simple docstring"""
_a = False
_a = ClassificationFunction.NONE
def __init__( self , **A ) -> Tuple:
'''simple docstring'''
super().__init__(**A )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def __A ( self , A=None , A=None , A="" , **A ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = tokenizer_kwargs
__magic_name__ = {}
if hasattr(self.model.config , '''return_all_scores''' ) and return_all_scores is None:
__magic_name__ = self.model.config.return_all_scores
if isinstance(A , A ) or top_k is None:
__magic_name__ = top_k
__magic_name__ = False
elif return_all_scores is not None:
warnings.warn(
'''`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of'''
''' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.''' , A , )
if return_all_scores:
__magic_name__ = None
else:
__magic_name__ = 1
if isinstance(A , A ):
__magic_name__ = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
__magic_name__ = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self , *A , **A ) -> Tuple:
'''simple docstring'''
__magic_name__ = super().__call__(*A , **A )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
__magic_name__ = '''top_k''' not in kwargs
if isinstance(args[0] , A ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def __A ( self , A , **A ) -> Any:
'''simple docstring'''
__magic_name__ = self.framework
if isinstance(A , A ):
return self.tokenizer(**A , return_tensors=A , **A )
elif isinstance(A , A ) and len(A ) == 1 and isinstance(inputs[0] , A ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=A , **A )
elif isinstance(A , A ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
'''The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a'''
''' dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair.''' )
return self.tokenizer(A , return_tensors=A , **A )
def __A ( self , A ) -> str:
'''simple docstring'''
return self.model(**A )
def __A ( self , A , A=None , A=1 , A=True ) -> Optional[int]:
'''simple docstring'''
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
__magic_name__ = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
__magic_name__ = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , '''function_to_apply''' ) and function_to_apply is None:
__magic_name__ = self.model.config.function_to_apply
else:
__magic_name__ = ClassificationFunction.NONE
__magic_name__ = model_outputs['''logits'''][0]
__magic_name__ = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
__magic_name__ = sigmoid(A )
elif function_to_apply == ClassificationFunction.SOFTMAX:
__magic_name__ = softmax(A )
elif function_to_apply == ClassificationFunction.NONE:
__magic_name__ = outputs
else:
raise ValueError(F'Unrecognized `function_to_apply` argument: {function_to_apply}' )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
__magic_name__ = [
{'''label''': self.model.config.idalabel[i], '''score''': score.item()} for i, score in enumerate(A )
]
if not _legacy:
dict_scores.sort(key=lambda A : x["score"] , reverse=A )
if top_k is not None:
__magic_name__ = dict_scores[:top_k]
return dict_scores | 713 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
a_ : Optional[int] = 16
a_ : int = 32
def _SCREAMING_SNAKE_CASE ( snake_case_ : Accelerator , snake_case_ : int = 16 , snake_case_ : str = "bert-base-cased" ):
__magic_name__ = AutoTokenizer.from_pretrained(snake_case_ )
__magic_name__ = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(snake_case_ : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
__magic_name__ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=snake_case_ , max_length=snake_case_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__magic_name__ = datasets.map(
snake_case_ , batched=snake_case_ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=snake_case_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__magic_name__ = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(snake_case_ : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(snake_case_ , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(snake_case_ , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
__magic_name__ = DataLoader(
tokenized_datasets['''train'''] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ )
__magic_name__ = DataLoader(
tokenized_datasets['''validation'''] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ )
return train_dataloader, eval_dataloader
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : str ):
model.eval()
__magic_name__ = 0
for step, batch in enumerate(snake_case_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__magic_name__ = model(**snake_case_ )
__magic_name__ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__magic_name__ , __magic_name__ = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(snake_case_ ) - 1:
__magic_name__ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__magic_name__ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=snake_case_ , references=snake_case_ , )
__magic_name__ = metric.compute()
return eval_metric["accuracy"]
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Tuple ):
# Initialize accelerator
__magic_name__ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__magic_name__ = config['''lr''']
__magic_name__ = int(config['''num_epochs'''] )
__magic_name__ = int(config['''seed'''] )
__magic_name__ = int(config['''batch_size'''] )
__magic_name__ = args.model_name_or_path
set_seed(snake_case_ )
__magic_name__ , __magic_name__ = get_dataloaders(snake_case_ , snake_case_ , snake_case_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__magic_name__ = AutoModelForSequenceClassification.from_pretrained(snake_case_ , return_dict=snake_case_ )
# Instantiate optimizer
__magic_name__ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__magic_name__ = optimizer_cls(params=model.parameters() , lr=snake_case_ )
if accelerator.state.deepspeed_plugin is not None:
__magic_name__ = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
__magic_name__ = 1
__magic_name__ = (len(snake_case_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__magic_name__ = get_linear_schedule_with_warmup(
optimizer=snake_case_ , num_warmup_steps=0 , num_training_steps=snake_case_ , )
else:
__magic_name__ = DummyScheduler(snake_case_ , total_num_steps=snake_case_ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = accelerator.prepare(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# We need to keep track of how many total steps we have iterated over
__magic_name__ = 0
# We also need to keep track of the stating epoch so files are named properly
__magic_name__ = 0
__magic_name__ = evaluate.load('''glue''' , '''mrpc''' )
__magic_name__ = num_epochs
if args.partial_train_epoch is not None:
__magic_name__ = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
__magic_name__ = args.resume_from_checkpoint.split('''epoch_''' )[1]
__magic_name__ = ''''''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
__magic_name__ = int(snake_case_ ) + 1
__magic_name__ = evaluation_loop(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
accelerator.print('''resumed checkpoint performance:''' , snake_case_ )
accelerator.print('''resumed checkpoint\'s scheduler\'s lr:''' , lr_scheduler.get_lr()[0] )
accelerator.print('''resumed optimizers\'s lr:''' , optimizer.param_groups[0]['''lr'''] )
with open(os.path.join(args.output_dir , f'state_{starting_epoch-1}.json' ) , '''r''' ) as f:
__magic_name__ = json.load(snake_case_ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
__magic_name__ = {}
for epoch in range(snake_case_ , snake_case_ ):
model.train()
for step, batch in enumerate(snake_case_ ):
__magic_name__ = model(**snake_case_ )
__magic_name__ = outputs.loss
__magic_name__ = loss / gradient_accumulation_steps
accelerator.backward(snake_case_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
__magic_name__ = f'epoch_{epoch}'
__magic_name__ = os.path.join(args.output_dir , snake_case_ )
accelerator.save_state(snake_case_ )
__magic_name__ = evaluation_loop(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
__magic_name__ = accuracy
__magic_name__ = lr_scheduler.get_lr()[0]
__magic_name__ = optimizer.param_groups[0]['''lr''']
__magic_name__ = epoch
__magic_name__ = overall_step
accelerator.print(f'epoch {epoch}:' , snake_case_ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f'state_{epoch}.json' ) , '''w''' ) as f:
json.dump(snake_case_ , snake_case_ )
def _SCREAMING_SNAKE_CASE ( ):
__magic_name__ = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=snake_case_ , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=snake_case_ , )
parser.add_argument(
'''--output_dir''' , type=snake_case_ , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--resume_from_checkpoint''' , type=snake_case_ , default=snake_case_ , help='''If the training should continue from a checkpoint folder.''' , )
parser.add_argument(
'''--partial_train_epoch''' , type=snake_case_ , default=snake_case_ , help='''If passed, the training will stop after this number of epochs.''' , )
parser.add_argument(
'''--num_epochs''' , type=snake_case_ , default=2 , help='''Number of train epochs.''' , )
__magic_name__ = parser.parse_args()
__magic_name__ = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(snake_case_ , snake_case_ )
if __name__ == "__main__":
main() | 678 | 0 |
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
a_ : Union[str, Any] = input('Enter image url: ').strip()
print(F"""Downloading image from {url} ...""")
a_ : int = BeautifulSoup(requests.get(url).content, 'html.parser')
# The image URL is in the content field of the first meta tag with property og:image
a_ : str = soup.find('meta', {'property': 'og:image'})['content']
a_ : Optional[Any] = requests.get(image_url).content
a_ : List[Any] = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"""
with open(file_name, 'wb') as fp:
fp.write(image_data)
print(F"""Done. Image saved to disk as {file_name}.""") | 714 |
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
return " ".join(
''''''.join(word[::-1] ) if len(snake_case_ ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('Hey wollef sroirraw')) | 678 | 0 |
import itertools
import math
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[str] ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _SCREAMING_SNAKE_CASE ( ):
__magic_name__ = 2
while True:
if is_prime(__A ):
yield num
num += 1
def _SCREAMING_SNAKE_CASE ( snake_case_ : Any = 1_0001 ):
return next(itertools.islice(prime_generator() , nth - 1 , __A ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 715 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
a_ : Any = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n'
a_ : int = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n'
a_ : List[str] = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE_ ( datasets.Metric ):
"""simple docstring"""
def __A ( self ) -> List[Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/krishnap25/mauve''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/krishnap25/mauve'''] , reference_urls=[
'''https://arxiv.org/abs/2102.01454''',
'''https://github.com/krishnap25/mauve''',
] , )
def __A ( self , A , A , A=None , A=None , A=None , A=None , A="auto" , A=-1 , A=0.9 , A=5 , A=5_00 , A="gpt2-large" , A=-1 , A=10_24 , A=25 , A=5 , A=True , A=25 , ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = compute_mauve(
p_text=A , q_text=A , p_features=A , q_features=A , p_tokens=A , q_tokens=A , num_buckets=A , pca_max_data=A , kmeans_explained_var=A , kmeans_num_redo=A , kmeans_max_iter=A , featurize_model_name=A , device_id=A , max_text_length=A , divergence_curve_discretization_size=A , mauve_scaling_factor=A , verbose=A , seed=A , )
return out | 678 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE_ ( a__ , unittest.TestCase ):
"""simple docstring"""
_a = LDMTextToImagePipeline
_a = TEXT_TO_IMAGE_PARAMS - {
"""negative_prompt""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
"""prompt_embeds""",
}
_a = PipelineTesterMixin.required_optional_params - {
"""num_images_per_prompt""",
"""callback""",
"""callback_steps""",
}
_a = TEXT_TO_IMAGE_BATCH_PARAMS
_a = False
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
__magic_name__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
__magic_name__ = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , clip_sample=_A , set_alpha_to_one=_A , )
torch.manual_seed(0 )
__magic_name__ = AutoencoderKL(
block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=('''DownEncoderBlock2D''', '''DownEncoderBlock2D''') , up_block_types=('''UpDecoderBlock2D''', '''UpDecoderBlock2D''') , latent_channels=4 , )
torch.manual_seed(0 )
__magic_name__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
__magic_name__ = CLIPTextModel(_A )
__magic_name__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__magic_name__ = {
'unet': unet,
'scheduler': scheduler,
'vqvae': vae,
'bert': text_encoder,
'tokenizer': tokenizer,
}
return components
def __A ( self , A , A=0 ) -> Tuple:
'''simple docstring'''
if str(_A ).startswith('''mps''' ):
__magic_name__ = torch.manual_seed(_A )
else:
__magic_name__ = torch.Generator(device=_A ).manual_seed(_A )
__magic_name__ = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ = 'cpu' # ensure determinism for the device-dependent torch.Generator
__magic_name__ = self.get_dummy_components()
__magic_name__ = LDMTextToImagePipeline(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
__magic_name__ = self.get_dummy_inputs(_A )
__magic_name__ = pipe(**_A ).images
__magic_name__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
__magic_name__ = np.array([0.61_01, 0.61_56, 0.56_22, 0.48_95, 0.66_61, 0.38_04, 0.57_48, 0.61_36, 0.50_14] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self , A , A=torch.floataa , A=0 ) -> Tuple:
'''simple docstring'''
__magic_name__ = torch.manual_seed(_A )
__magic_name__ = np.random.RandomState(_A ).standard_normal((1, 4, 32, 32) )
__magic_name__ = torch.from_numpy(_A ).to(device=_A , dtype=_A )
__magic_name__ = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = LDMTextToImagePipeline.from_pretrained('''CompVis/ldm-text2im-large-256''' ).to(_A )
pipe.set_progress_bar_config(disable=_A )
__magic_name__ = self.get_inputs(_A )
__magic_name__ = pipe(**_A ).images
__magic_name__ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 2_56, 2_56, 3)
__magic_name__ = np.array([0.5_18_25, 0.5_28_50, 0.5_25_43, 0.5_42_58, 0.5_23_04, 0.5_25_69, 0.5_43_63, 0.5_52_76, 0.5_68_78] )
__magic_name__ = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1E-3
@nightly
@require_torch_gpu
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self , A , A=torch.floataa , A=0 ) -> Tuple:
'''simple docstring'''
__magic_name__ = torch.manual_seed(_A )
__magic_name__ = np.random.RandomState(_A ).standard_normal((1, 4, 32, 32) )
__magic_name__ = torch.from_numpy(_A ).to(device=_A , dtype=_A )
__magic_name__ = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 50,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __A ( self ) -> List[str]:
'''simple docstring'''
__magic_name__ = LDMTextToImagePipeline.from_pretrained('''CompVis/ldm-text2im-large-256''' ).to(_A )
pipe.set_progress_bar_config(disable=_A )
__magic_name__ = self.get_inputs(_A )
__magic_name__ = pipe(**_A ).images[0]
__magic_name__ = load_numpy(
'''https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy''' )
__magic_name__ = np.abs(expected_image - image ).max()
assert max_diff < 1E-3 | 716 |
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
a_ : Tuple = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
a_ : List[str] = typing.Union[np.floataa, int, float] # noqa: UP007
def _SCREAMING_SNAKE_CASE ( snake_case_ : Vector , snake_case_ : Vector ):
return np.sqrt(np.sum((np.asarray(snake_case_ ) - np.asarray(snake_case_ )) ** 2 ) )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Vector , snake_case_ : Vector ):
return sum((va - va) ** 2 for va, va in zip(snake_case_ , snake_case_ ) ) ** (1 / 2)
if __name__ == "__main__":
def _SCREAMING_SNAKE_CASE ( ):
from timeit import timeit
print('''Without Numpy''' )
print(
timeit(
'''euclidean_distance_no_np([1, 2, 3], [4, 5, 6])''' , number=1_0000 , globals=globals() , ) )
print('''With Numpy''' )
print(
timeit(
'''euclidean_distance([1, 2, 3], [4, 5, 6])''' , number=1_0000 , globals=globals() , ) )
benchmark() | 678 | 0 |
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = 0
@slow
def __A ( self ) -> List[Any]:
'''simple docstring'''
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
__magic_name__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(__lowerCamelCase ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
__magic_name__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(__lowerCamelCase ) , 0 )
def __A ( self ) -> List[str]:
'''simple docstring'''
__magic_name__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def __A ( self ) -> List[Any]:
'''simple docstring'''
__magic_name__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def __A ( self ) -> List[Any]:
'''simple docstring'''
__magic_name__ = AutoConfig.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
# Check that tokenizer_type ≠ model_type
__magic_name__ = AutoTokenizer.from_pretrained(__lowerCamelCase , config=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def __A ( self ) -> int:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(__lowerCamelCase , '''vocab.txt''' ) )
__magic_name__ = AutoTokenizer.from_pretrained(__lowerCamelCase , tokenizer_type='''bert''' , use_fast=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(__lowerCamelCase , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(__lowerCamelCase , '''merges.txt''' ) )
__magic_name__ = AutoTokenizer.from_pretrained(__lowerCamelCase , tokenizer_type='''gpt2''' , use_fast=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
@require_tokenizers
def __A ( self ) -> List[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(__lowerCamelCase , '''vocab.txt''' ) )
__magic_name__ = AutoTokenizer.from_pretrained(__lowerCamelCase , tokenizer_type='''bert''' )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(__lowerCamelCase , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(__lowerCamelCase , '''merges.txt''' ) )
__magic_name__ = AutoTokenizer.from_pretrained(__lowerCamelCase , tokenizer_type='''gpt2''' )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
def __A ( self ) -> Dict:
'''simple docstring'''
with pytest.raises(__lowerCamelCase ):
AutoTokenizer.from_pretrained('''./''' , tokenizer_type='''xxx''' )
@require_tokenizers
def __A ( self ) -> Any:
'''simple docstring'''
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
__magic_name__ = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , __lowerCamelCase )
else:
self.assertEqual(tokenizer.do_lower_case , __lowerCamelCase )
self.assertEqual(tokenizer.model_max_length , 5_12 )
@require_tokenizers
def __A ( self ) -> str:
'''simple docstring'''
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
__lowerCamelCase , '''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' , ):
__magic_name__ = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' )
def __A ( self ) -> str:
'''simple docstring'''
__magic_name__ = TOKENIZER_MAPPING.values()
__magic_name__ = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(__lowerCamelCase )
@require_tokenizers
def __A ( self ) -> int:
'''simple docstring'''
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=__lowerCamelCase ) , __lowerCamelCase )
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ) , __lowerCamelCase )
@require_tokenizers
def __A ( self ) -> Tuple:
'''simple docstring'''
__magic_name__ = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' , do_lower_case=__lowerCamelCase )
__magic_name__ = "Hello, world. How are you?"
__magic_name__ = tokenizer.tokenize(__lowerCamelCase )
self.assertEqual('''[UNK]''' , tokens[0] )
__magic_name__ = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' , do_lower_case=__lowerCamelCase )
__magic_name__ = tokenizer.tokenize(__lowerCamelCase )
self.assertEqual('''[UNK]''' , tokens[0] )
@require_tokenizers
def __A ( self ) -> Tuple:
'''simple docstring'''
__magic_name__ = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' )
self.assertEqual(type(__lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(tokenizer.model_max_length , 5_12 )
self.assertEqual(tokenizer.vocab_size , 3_00_00 )
self.assertEqual(tokenizer.unk_token , '''[UNK]''' )
self.assertEqual(tokenizer.padding_side , '''right''' )
self.assertEqual(tokenizer.truncation_side , '''right''' )
def __A ( self ) -> List[str]:
'''simple docstring'''
__magic_name__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
__magic_name__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def __A ( self ) -> Any:
'''simple docstring'''
__magic_name__ = AutoTokenizer.from_pretrained('''ctrl''' )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ = get_tokenizer_config('''bert-base-cased''' )
__magic_name__ = config.pop('''_commit_hash''' , __lowerCamelCase )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(__lowerCamelCase , {'''do_lower_case''': False} )
# This model does not have a tokenizer_config so we get back an empty dict.
__magic_name__ = get_tokenizer_config(__lowerCamelCase )
self.assertDictEqual(__lowerCamelCase , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
__magic_name__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
__magic_name__ = get_tokenizer_config(__lowerCamelCase )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['''tokenizer_class'''] , '''BertTokenizer''' )
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
try:
AutoConfig.register('''custom''' , __lowerCamelCase )
AutoTokenizer.register(__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowerCamelCase ):
AutoTokenizer.register(__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase )
__magic_name__ = CustomTokenizer.from_pretrained(__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
__magic_name__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def __A ( self ) -> Tuple:
'''simple docstring'''
try:
AutoConfig.register('''custom''' , __lowerCamelCase )
# Can register in two steps
AutoTokenizer.register(__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(__lowerCamelCase , fast_tokenizer_class=__lowerCamelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase , fast_tokenizer_class=__lowerCamelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowerCamelCase ):
AutoTokenizer.register(__lowerCamelCase , fast_tokenizer_class=__lowerCamelCase )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
__magic_name__ = BertTokenizerFast.from_pretrained(__lowerCamelCase )
bert_tokenizer.save_pretrained(__lowerCamelCase )
__magic_name__ = CustomTokenizerFast.from_pretrained(__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
__magic_name__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
__magic_name__ = AutoTokenizer.from_pretrained(__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def __A ( self ) -> List[str]:
'''simple docstring'''
with self.assertRaises(__lowerCamelCase ):
__magic_name__ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__lowerCamelCase ):
__magic_name__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase )
__magic_name__ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
__magic_name__ = AutoTokenizer.from_pretrained(__lowerCamelCase , trust_remote_code=__lowerCamelCase )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
__magic_name__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
__magic_name__ = AutoTokenizer.from_pretrained(__lowerCamelCase , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
@require_tokenizers
def __A ( self ) -> Any:
'''simple docstring'''
class SCREAMING_SNAKE_CASE_ ( _A ):
"""simple docstring"""
_a = False
class SCREAMING_SNAKE_CASE_ ( _A ):
"""simple docstring"""
_a = NewTokenizer
_a = False
try:
AutoConfig.register('''custom''' , __lowerCamelCase )
AutoTokenizer.register(__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase )
AutoTokenizer.register(__lowerCamelCase , fast_tokenizer_class=__lowerCamelCase )
# If remote code is not set, the default is to use local
__magic_name__ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
__magic_name__ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , use_fast=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
__magic_name__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
__magic_name__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
__magic_name__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertTrue(tokenizer.special_attribute_present )
__magic_name__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def __A ( self ) -> int:
'''simple docstring'''
__magic_name__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=__lowerCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
__magic_name__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def __A ( self ) -> Tuple:
'''simple docstring'''
with self.assertRaisesRegex(
__lowerCamelCase , '''bert-base is not a local folder and is not a valid model identifier''' ):
__magic_name__ = AutoTokenizer.from_pretrained('''bert-base''' )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
with self.assertRaisesRegex(
__lowerCamelCase , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
__magic_name__ = AutoTokenizer.from_pretrained(__lowerCamelCase , revision='''aaaaaa''' )
def __A ( self ) -> Dict:
'''simple docstring'''
__magic_name__ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
__magic_name__ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 717 |
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
a_ : str = logging.get_logger(__name__)
a_ : Union[str, Any] = 'https://openaipublic.azureedge.net/jukebox/models/'
a_ : List[Any] = {
'jukebox-1b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'1b_lyrics/prior_level_2.pth.tar',
],
'jukebox-5b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'5b_lyrics/prior_level_2.pth.tar',
],
}
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] ):
if key.endswith('''.model.1.bias''' ) and len(key.split('''.''' ) ) > 10:
__magic_name__ = key.replace('''.model.1.bias''' , '''.conv1d_1.bias''' )
elif key.endswith('''.model.1.weight''' ) and len(key.split('''.''' ) ) > 10:
__magic_name__ = key.replace('''.model.1.weight''' , '''.conv1d_1.weight''' )
elif key.endswith('''.model.3.bias''' ) and len(key.split('''.''' ) ) > 10:
__magic_name__ = key.replace('''.model.3.bias''' , '''.conv1d_2.bias''' )
elif key.endswith('''.model.3.weight''' ) and len(key.split('''.''' ) ) > 10:
__magic_name__ = key.replace('''.model.3.weight''' , '''.conv1d_2.weight''' )
if "conditioner_blocks.0." in key:
__magic_name__ = key.replace('''conditioner_blocks.0''' , '''conditioner_blocks''' )
if "prime_prior" in key:
__magic_name__ = key.replace('''prime_prior''' , '''encoder''' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
__magic_name__ = key.replace('''.emb.''' , '''.''' )
if key.endswith('''k''' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('''.k''' , '''.codebook''' )
if "y_emb." in key:
return key.replace('''y_emb.''' , '''metadata_embedding.''' )
if "x_emb.emb." in key:
__magic_name__ = key.replace('''0.x_emb.emb''' , '''embed_tokens''' )
if "prime_state_ln" in key:
return key.replace('''prime_state_ln''' , '''encoder.final_layer_norm''' )
if ".ln" in key:
return key.replace('''.ln''' , '''.layer_norm''' )
if "_ln" in key:
return key.replace('''_ln''' , '''_layer_norm''' )
if "prime_state_proj" in key:
return key.replace('''prime_state_proj''' , '''encoder.proj_in''' )
if "prime_x_out" in key:
return key.replace('''prime_x_out''' , '''encoder.lm_head''' )
if "prior.x_out" in key:
return key.replace('''x_out''' , '''fc_proj_out''' )
if "x_emb" in key:
return key.replace('''x_emb''' , '''embed_tokens''' )
return key
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : Optional[int] ):
__magic_name__ = {}
import re
__magic_name__ = re.compile(r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
__magic_name__ = re.compile(
r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
__magic_name__ = re.compile(r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
__magic_name__ = re.compile(r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
__magic_name__ = re.compile(
r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
__magic_name__ = re.compile(r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
__magic_name__ = re.compile(r'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)''' )
__magic_name__ = re.compile(
r'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
__magic_name__ = re.compile(r'''conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)''' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(snake_case_ ):
__magic_name__ = re_encoder_block_conv_in.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[2] ) * 2 + int(groups[3] )
__magic_name__ = f'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'
__magic_name__ = re_encoder_block_conv_in.sub(snake_case_ , snake_case_ )
elif re_encoder_block_resnet.fullmatch(snake_case_ ):
__magic_name__ = re_encoder_block_resnet.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[2] ) * 2 + int(groups[3] )
__magic_name__ = {'''1''': 1, '''3''': 2}[groups[-2]]
__magic_name__ = f'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'
__magic_name__ = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
__magic_name__ = prefix + resnet_block
__magic_name__ = re_encoder_block_resnet.sub(snake_case_ , snake_case_ )
elif re_encoder_block_proj_out.fullmatch(snake_case_ ):
__magic_name__ = re_encoder_block_proj_out.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = f'encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'
__magic_name__ = re_encoder_block_proj_out.sub(snake_case_ , snake_case_ )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(snake_case_ ):
__magic_name__ = re_decoder_block_conv_out.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[2] ) * 2 + int(groups[3] ) - 2
__magic_name__ = f'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'
__magic_name__ = re_decoder_block_conv_out.sub(snake_case_ , snake_case_ )
elif re_decoder_block_resnet.fullmatch(snake_case_ ):
__magic_name__ = re_decoder_block_resnet.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[2] ) * 2 + int(groups[3] ) - 2
__magic_name__ = {'''1''': 1, '''3''': 2}[groups[-2]]
__magic_name__ = f'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'
__magic_name__ = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
__magic_name__ = prefix + resnet_block
__magic_name__ = re_decoder_block_resnet.sub(snake_case_ , snake_case_ )
elif re_decoder_block_proj_in.fullmatch(snake_case_ ):
__magic_name__ = re_decoder_block_proj_in.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = f'decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'
__magic_name__ = re_decoder_block_proj_in.sub(snake_case_ , snake_case_ )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(snake_case_ ):
__magic_name__ = re_prior_cond_conv_out.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[1] ) * 2 + int(groups[2] ) - 2
__magic_name__ = f'conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'
__magic_name__ = re_prior_cond_conv_out.sub(snake_case_ , snake_case_ )
elif re_prior_cond_resnet.fullmatch(snake_case_ ):
__magic_name__ = re_prior_cond_resnet.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[1] ) * 2 + int(groups[2] ) - 2
__magic_name__ = {'''1''': 1, '''3''': 2}[groups[-2]]
__magic_name__ = f'conditioner_blocks.upsampler.upsample_block.{block_index}.'
__magic_name__ = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
__magic_name__ = prefix + resnet_block
__magic_name__ = re_prior_cond_resnet.sub(snake_case_ , snake_case_ )
elif re_prior_cond_proj_in.fullmatch(snake_case_ ):
__magic_name__ = re_prior_cond_proj_in.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = f'conditioner_blocks.upsampler.proj_in.{groups[-1]}'
__magic_name__ = re_prior_cond_proj_in.sub(snake_case_ , snake_case_ )
# keep original key
else:
__magic_name__ = original_key
__magic_name__ = replace_key(snake_case_ )
if f'{key_prefix}.{key}' not in model_state_dict or key is None:
print(f'failed converting {original_key} to {key}, does not match' )
# handle missmatched shape
elif value.shape != model_state_dict[f'{key_prefix}.{key}'].shape:
__magic_name__ = model_state_dict[f'{key_prefix}.{key}']
print(f'{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match' )
__magic_name__ = original_key
__magic_name__ = original_key
__magic_name__ = value
return new_dict
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( snake_case_ : Dict=None , snake_case_ : Any=None ):
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' ):
__magic_name__ = requests.get(f'{PREFIX}{file}' , allow_redirects=snake_case_ )
os.makedirs(f'{pytorch_dump_folder_path}/' , exist_ok=snake_case_ )
open(f'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' , '''wb''' ).write(r.content )
__magic_name__ = MODEL_MAPPING[model_name.split('''/''' )[-1]]
__magic_name__ = JukeboxConfig.from_pretrained(snake_case_ )
__magic_name__ = JukeboxModel(snake_case_ )
__magic_name__ = []
__magic_name__ = {}
for i, dict_name in enumerate(snake_case_ ):
__magic_name__ = torch.load(f'{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}' )['''model''']
__magic_name__ = {}
for k in old_dic.keys():
if k.endswith('''.b''' ):
__magic_name__ = old_dic[k]
elif k.endswith('''.w''' ):
__magic_name__ = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
__magic_name__ = old_dic[k]
else:
__magic_name__ = old_dic[k]
__magic_name__ = '''vqvae''' if i == 0 else f'priors.{3 - i}'
__magic_name__ = fix_jukebox_keys(snake_case_ , model.state_dict() , snake_case_ , snake_case_ )
weight_dict.append(snake_case_ )
__magic_name__ = weight_dict.pop(0 )
model.vqvae.load_state_dict(snake_case_ )
for i in range(len(snake_case_ ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
with open(f'{pytorch_dump_folder_path}/mapping.json' , '''w''' ) as txtfile:
json.dump(snake_case_ , snake_case_ )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case_ )
return weight_dict
if __name__ == "__main__":
a_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='jukebox-5b-lyrics',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='jukebox-5b-lyrics-converted',
type=str,
help='Path to the output PyTorch model directory.',
)
a_ : int = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path) | 678 | 0 |
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
a_ : Dict = logging.get_logger(__name__)
a_ : Any = {
"t5-small": "https://huggingface.co/t5-small/resolve/main/config.json",
"t5-base": "https://huggingface.co/t5-base/resolve/main/config.json",
"t5-large": "https://huggingface.co/t5-large/resolve/main/config.json",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/config.json",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase ):
"""simple docstring"""
_a = """t5"""
_a = ["""past_key_values"""]
_a = {"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self , A=3_21_28 , A=5_12 , A=64 , A=20_48 , A=6 , A=None , A=8 , A=32 , A=1_28 , A=0.1 , A=1E-6 , A=1.0 , A="relu" , A=True , A=True , A=0 , A=1 , **A , ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ = vocab_size
__magic_name__ = d_model
__magic_name__ = d_kv
__magic_name__ = d_ff
__magic_name__ = num_layers
__magic_name__ = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__magic_name__ = num_heads
__magic_name__ = relative_attention_num_buckets
__magic_name__ = relative_attention_max_distance
__magic_name__ = dropout_rate
__magic_name__ = layer_norm_epsilon
__magic_name__ = initializer_factor
__magic_name__ = feed_forward_proj
__magic_name__ = use_cache
__magic_name__ = self.feed_forward_proj.split('''-''' )
__magic_name__ = act_info[-1]
__magic_name__ = act_info[0] == '''gated'''
if len(lowerCamelCase_ ) > 1 and act_info[0] != "gated" or len(lowerCamelCase_ ) > 2:
raise ValueError(
F'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
__magic_name__ = '''gelu_new'''
super().__init__(
pad_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , is_encoder_decoder=lowerCamelCase_ , **lowerCamelCase_ , )
class SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase ):
"""simple docstring"""
@property
def __A ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
__magic_name__ = {
'''input_ids''': {0: '''batch''', 1: '''encoder_sequence'''},
'''attention_mask''': {0: '''batch''', 1: '''encoder_sequence'''},
}
if self.use_past:
__magic_name__ = '''past_encoder_sequence + sequence'''
__magic_name__ = {0: '''batch'''}
__magic_name__ = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
__magic_name__ = {0: '''batch''', 1: '''decoder_sequence'''}
__magic_name__ = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase_ , direction='''inputs''' )
return common_inputs
@property
def __A ( self ) -> int:
'''simple docstring'''
return 13 | 718 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a_ : int = logging.get_logger(__name__)
a_ : Optional[int] = {
'microsoft/table-transformer-detection': (
'https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'
),
}
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = """table-transformer"""
_a = ["""past_key_values"""]
_a = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , A=True , A=None , A=3 , A=1_00 , A=6 , A=20_48 , A=8 , A=6 , A=20_48 , A=8 , A=0.0 , A=0.0 , A=True , A="relu" , A=2_56 , A=0.1 , A=0.0 , A=0.0 , A=0.02 , A=1.0 , A=False , A="sine" , A="resnet50" , A=True , A=False , A=1 , A=5 , A=2 , A=1 , A=1 , A=5 , A=2 , A=0.1 , **A , ) -> Any:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
__magic_name__ = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(A , A ):
__magic_name__ = backbone_config.get('''model_type''' )
__magic_name__ = CONFIG_MAPPING[backbone_model_type]
__magic_name__ = config_class.from_dict(A )
# set timm attributes to None
__magic_name__ , __magic_name__ , __magic_name__ = None, None, None
__magic_name__ = use_timm_backbone
__magic_name__ = backbone_config
__magic_name__ = num_channels
__magic_name__ = num_queries
__magic_name__ = d_model
__magic_name__ = encoder_ffn_dim
__magic_name__ = encoder_layers
__magic_name__ = encoder_attention_heads
__magic_name__ = decoder_ffn_dim
__magic_name__ = decoder_layers
__magic_name__ = decoder_attention_heads
__magic_name__ = dropout
__magic_name__ = attention_dropout
__magic_name__ = activation_dropout
__magic_name__ = activation_function
__magic_name__ = init_std
__magic_name__ = init_xavier_std
__magic_name__ = encoder_layerdrop
__magic_name__ = decoder_layerdrop
__magic_name__ = encoder_layers
__magic_name__ = auxiliary_loss
__magic_name__ = position_embedding_type
__magic_name__ = backbone
__magic_name__ = use_pretrained_backbone
__magic_name__ = dilation
# Hungarian matcher
__magic_name__ = class_cost
__magic_name__ = bbox_cost
__magic_name__ = giou_cost
# Loss coefficients
__magic_name__ = mask_loss_coefficient
__magic_name__ = dice_loss_coefficient
__magic_name__ = bbox_loss_coefficient
__magic_name__ = giou_loss_coefficient
__magic_name__ = eos_coefficient
super().__init__(is_encoder_decoder=A , **A )
@property
def __A ( self ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def __A ( self ) -> int:
'''simple docstring'''
return self.d_model
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = version.parse("""1.11""" )
@property
def __A ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def __A ( self ) -> float:
'''simple docstring'''
return 1E-5
@property
def __A ( self ) -> int:
'''simple docstring'''
return 12 | 678 | 0 |
a_ : List[Any] = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
a_ : Union[str, Any] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
a_ : Union[str, Any] = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
} | 719 |
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Optional[int] , snake_case_ : Union[str, Any] ):
# Initialise PyTorch model
__magic_name__ = LxmertConfig.from_json_file(snake_case_ )
print(f'Building PyTorch model from configuration: {config}' )
__magic_name__ = LxmertForPreTraining(snake_case_ )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(snake_case_ , snake_case_ , snake_case_ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , snake_case_ )
if __name__ == "__main__":
a_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a_ : Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path) | 678 | 0 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , A , A=13 , A=7 , A=True , A=True , A=True , A=True , A=99 , A=32 , A=2 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=5_12 , A=16 , A=2 , A=0.02 , A=3 , A=4 , A=None , ) -> Tuple:
'''simple docstring'''
__magic_name__ = parent
__magic_name__ = 13
__magic_name__ = 7
__magic_name__ = True
__magic_name__ = True
__magic_name__ = True
__magic_name__ = True
__magic_name__ = 99
__magic_name__ = 3_84
__magic_name__ = 2
__magic_name__ = 4
__magic_name__ = 37
__magic_name__ = '''gelu'''
__magic_name__ = 0.1
__magic_name__ = 0.1
__magic_name__ = 5_12
__magic_name__ = 16
__magic_name__ = 2
__magic_name__ = 0.02
__magic_name__ = 3
__magic_name__ = 4
__magic_name__ = 1_28
__magic_name__ = 2
__magic_name__ = 9
__magic_name__ = 1
__magic_name__ = None
def __A ( self ) -> int:
'''simple docstring'''
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ = None
if self.use_input_mask:
__magic_name__ = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ = None
if self.use_token_type_ids:
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
if self.use_labels:
__magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=snake_case_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self , A , A , A , A , A , A , A ) -> int:
'''simple docstring'''
__magic_name__ = TFConvBertModel(config=snake_case_ )
__magic_name__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__magic_name__ = [input_ids, input_mask]
__magic_name__ = model(snake_case_ )
__magic_name__ = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , A , A , A , A , A , A , A ) -> int:
'''simple docstring'''
__magic_name__ = TFConvBertForMaskedLM(config=snake_case_ )
__magic_name__ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__magic_name__ = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , A , A , A , A , A , A , A ) -> List[str]:
'''simple docstring'''
__magic_name__ = self.num_labels
__magic_name__ = TFConvBertForSequenceClassification(config=snake_case_ )
__magic_name__ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__magic_name__ = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self , A , A , A , A , A , A , A ) -> Tuple:
'''simple docstring'''
__magic_name__ = self.num_choices
__magic_name__ = TFConvBertForMultipleChoice(config=snake_case_ )
__magic_name__ = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
__magic_name__ = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
__magic_name__ = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
__magic_name__ = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
__magic_name__ = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __A ( self , A , A , A , A , A , A , A ) -> List[str]:
'''simple docstring'''
__magic_name__ = self.num_labels
__magic_name__ = TFConvBertForTokenClassification(config=snake_case_ )
__magic_name__ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__magic_name__ = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self , A , A , A , A , A , A , A ) -> List[Any]:
'''simple docstring'''
__magic_name__ = TFConvBertForQuestionAnswering(config=snake_case_ )
__magic_name__ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__magic_name__ = model(snake_case_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self ) -> Tuple:
'''simple docstring'''
__magic_name__ = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) = config_and_inputs
__magic_name__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE_ ( _a , _a , unittest.TestCase ):
"""simple docstring"""
_a = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_a = (
{
"""feature-extraction""": TFConvBertModel,
"""fill-mask""": TFConvBertForMaskedLM,
"""question-answering""": TFConvBertForQuestionAnswering,
"""text-classification""": TFConvBertForSequenceClassification,
"""token-classification""": TFConvBertForTokenClassification,
"""zero-shot""": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_a = False
_a = False
_a = False
def __A ( self ) -> Dict:
'''simple docstring'''
__magic_name__ = TFConvBertModelTester(self )
__magic_name__ = ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def __A ( self ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
def __A ( self ) -> int:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def __A ( self ) -> Dict:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case_ )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case_ )
def __A ( self ) -> Dict:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case_ )
def __A ( self ) -> int:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case_ )
def __A ( self ) -> List[str]:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case_ )
@slow
def __A ( self ) -> List[Any]:
'''simple docstring'''
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ = True
__magic_name__ = True
if hasattr(snake_case_ , '''use_cache''' ):
__magic_name__ = True
__magic_name__ = getattr(self.model_tester , '''encoder_seq_length''' , self.model_tester.seq_length )
__magic_name__ = getattr(self.model_tester , '''key_length''' , snake_case_ )
for model_class in self.all_model_classes:
__magic_name__ = self._prepare_for_class(snake_case_ , snake_case_ )
__magic_name__ = model_class(snake_case_ )
__magic_name__ = len(model(snake_case_ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case_ , saved_model=snake_case_ )
__magic_name__ = os.path.join(snake_case_ , '''saved_model''' , '''1''' )
__magic_name__ = tf.keras.models.load_model(snake_case_ )
__magic_name__ = model(snake_case_ )
if self.is_encoder_decoder:
__magic_name__ = outputs['''encoder_hidden_states''']
__magic_name__ = outputs['''encoder_attentions''']
else:
__magic_name__ = outputs['''hidden_states''']
__magic_name__ = outputs['''attentions''']
self.assertEqual(len(snake_case_ ) , snake_case_ )
__magic_name__ = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(snake_case_ ) , snake_case_ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' )
self.assertIsNotNone(snake_case_ )
def __A ( self ) -> Dict:
'''simple docstring'''
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ = True
__magic_name__ = getattr(self.model_tester , '''decoder_seq_length''' , self.model_tester.seq_length )
__magic_name__ = getattr(self.model_tester , '''encoder_seq_length''' , self.model_tester.seq_length )
__magic_name__ = getattr(self.model_tester , '''key_length''' , snake_case_ )
__magic_name__ = getattr(self.model_tester , '''key_length''' , snake_case_ )
def check_decoder_attentions_output(A ):
__magic_name__ = len(snake_case_ )
self.assertEqual(out_len % 2 , 0 )
__magic_name__ = outputs.decoder_attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(A ):
__magic_name__ = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__magic_name__ = True
__magic_name__ = False
__magic_name__ = model_class(snake_case_ )
__magic_name__ = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
__magic_name__ = len(snake_case_ )
self.assertEqual(config.output_hidden_states , snake_case_ )
check_encoder_attentions_output(snake_case_ )
if self.is_encoder_decoder:
__magic_name__ = model_class(snake_case_ )
__magic_name__ = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(config.output_hidden_states , snake_case_ )
check_decoder_attentions_output(snake_case_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__magic_name__ = True
__magic_name__ = model_class(snake_case_ )
__magic_name__ = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(config.output_hidden_states , snake_case_ )
check_encoder_attentions_output(snake_case_ )
# Check attention is always last and order is fine
__magic_name__ = True
__magic_name__ = True
__magic_name__ = model_class(snake_case_ )
__magic_name__ = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(snake_case_ ) )
self.assertEqual(model.config.output_hidden_states , snake_case_ )
check_encoder_attentions_output(snake_case_ )
@require_tf
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __A ( self ) -> List[Any]:
'''simple docstring'''
__magic_name__ = TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' )
__magic_name__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
__magic_name__ = model(snake_case_ )[0]
__magic_name__ = [1, 6, 7_68]
self.assertEqual(output.shape , snake_case_ )
__magic_name__ = tf.constant(
[
[
[-0.03_47_54_93, -0.4_68_60_34, -0.30_63_88_32],
[0.22_63_72_48, -0.26_98_86_46, -0.7_42_34_24],
[0.10_32_48_68, -0.45_01_35_08, -0.58_28_07_84],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , snake_case_ , atol=1E-4 ) | 720 |
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def _SCREAMING_SNAKE_CASE ( snake_case_ : Tuple , snake_case_ : Union[str, Any] , snake_case_ : List[str] , snake_case_ : Union[str, Any] ):
__magic_name__ = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, nicht wahr?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
__magic_name__ = {
'''wmt16-en-de-dist-12-1''': [28.3, 27.52],
'''wmt16-en-de-dist-6-1''': [27.4, 27.11],
'''wmt16-en-de-12-1''': [26.9, 25.75],
}
__magic_name__ = f'{src_lang}-{tgt_lang}'
__magic_name__ = f'\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "allenai/{model_name}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n'
model_card_dir.mkdir(parents=snake_case_ , exist_ok=snake_case_ )
__magic_name__ = os.path.join(snake_case_ , '''README.md''' )
print(f'Generating {path}' )
with open(snake_case_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(snake_case_ )
# make sure we are under the root of the project
a_ : Tuple = Path(__file__).resolve().parent.parent.parent
a_ : Dict = repo_dir / 'model_cards'
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
a_ : List[str] = model_cards_dir / 'allenai' / model_name
write_model_card(model_card_dir, src_lang='en', tgt_lang='de', model_name=model_name) | 678 | 0 |
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
a_ : Tuple = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , A , A=7 , A=3 , A=18 , A=30 , A=4_00 , A=None , A=True , A=True , A=None , ) -> List[Any]:
'''simple docstring'''
__magic_name__ = size if size is not None else {"""height""": 20, """width""": 20}
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = num_channels
__magic_name__ = image_size
__magic_name__ = min_resolution
__magic_name__ = max_resolution
__magic_name__ = size
__magic_name__ = do_normalize
__magic_name__ = do_convert_rgb
__magic_name__ = [5_12, 10_24, 20_48, 40_96]
__magic_name__ = patch_size if patch_size is not None else {"""height""": 16, """width""": 16}
def __A ( self ) -> List[str]:
'''simple docstring'''
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def __A ( self ) -> List[str]:
'''simple docstring'''
__magic_name__ = """https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"""
__magic_name__ = Image.open(requests.get(_lowercase , stream=_lowercase ).raw ).convert('''RGB''' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_a = PixaStructImageProcessor if is_vision_available() else None
def __A ( self ) -> Tuple:
'''simple docstring'''
__magic_name__ = PixaStructImageProcessingTester(self )
@property
def __A ( self ) -> Optional[int]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self ) -> Any:
'''simple docstring'''
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowercase , '''do_normalize''' ) )
self.assertTrue(hasattr(_lowercase , '''do_convert_rgb''' ) )
def __A ( self ) -> List[Any]:
'''simple docstring'''
__magic_name__ = self.image_processor_tester.prepare_dummy_image()
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
__magic_name__ = 20_48
__magic_name__ = image_processor(_lowercase , return_tensors='''pt''' , max_patches=_lowercase )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.06_06 ) , atol=1E-3 , rtol=1E-3 ) )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , Image.Image )
# Test not batched input
__magic_name__ = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__magic_name__ = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=_lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__magic_name__ = image_processor(
_lowercase , return_tensors='''pt''' , max_patches=_lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , Image.Image )
# Test not batched input
__magic_name__ = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* self.image_processor_tester.num_channels
) + 2
__magic_name__ = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(_lowercase ):
__magic_name__ = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=_lowercase ).flattened_patches
__magic_name__ = """Hello"""
__magic_name__ = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=_lowercase , header_text=_lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__magic_name__ = image_processor(
_lowercase , return_tensors='''pt''' , max_patches=_lowercase , header_text=_lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , numpify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , np.ndarray )
__magic_name__ = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__magic_name__ = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=_lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__magic_name__ = image_processor(
_lowercase , return_tensors='''pt''' , max_patches=_lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __A ( self ) -> str:
'''simple docstring'''
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , torchify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , torch.Tensor )
# Test not batched input
__magic_name__ = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__magic_name__ = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=_lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__magic_name__ = image_processor(
_lowercase , return_tensors='''pt''' , max_patches=_lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_a = PixaStructImageProcessor if is_vision_available() else None
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = PixaStructImageProcessingTester(self , num_channels=4 )
__magic_name__ = 3
@property
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self ) -> int:
'''simple docstring'''
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowercase , '''do_normalize''' ) )
self.assertTrue(hasattr(_lowercase , '''do_convert_rgb''' ) )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , Image.Image )
# Test not batched input
__magic_name__ = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__magic_name__ = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=_lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__magic_name__ = image_processor(
_lowercase , return_tensors='''pt''' , max_patches=_lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) | 721 |
def _SCREAMING_SNAKE_CASE ( snake_case_ : list[int] , snake_case_ : list[int] ):
__magic_name__ = len(snake_case_ )
print('''The following activities are selected:''' )
# The first activity is always selected
__magic_name__ = 0
print(snake_case_ , end=''',''' )
# Consider rest of the activities
for j in range(snake_case_ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(snake_case_ , end=''',''' )
__magic_name__ = j
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ : Dict = [1, 3, 0, 5, 8, 5]
a_ : Union[str, Any] = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish) | 678 | 0 |
from ...processing_utils import ProcessorMixin
class SCREAMING_SNAKE_CASE_ ( _lowercase ):
"""simple docstring"""
_a = '''WhisperFeatureExtractor'''
_a = '''WhisperTokenizer'''
def __init__( self , A , A ) -> List[Any]:
'''simple docstring'''
super().__init__(A_ , A_ )
__magic_name__ = self.feature_extractor
__magic_name__ = False
def __A ( self , A=None , A=None , A=True ) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.get_decoder_prompt_ids(task=A_ , language=A_ , no_timestamps=A_ )
def __call__( self , *A , **A ) -> Tuple:
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*A_ , **A_ )
__magic_name__ = kwargs.pop('''audio''' , A_ )
__magic_name__ = kwargs.pop('''sampling_rate''' , A_ )
__magic_name__ = kwargs.pop('''text''' , A_ )
if len(A_ ) > 0:
__magic_name__ = args[0]
__magic_name__ = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
__magic_name__ = self.feature_extractor(A_ , *A_ , sampling_rate=A_ , **A_ )
if text is not None:
__magic_name__ = self.tokenizer(A_ , **A_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
__magic_name__ = encodings['''input_ids''']
return inputs
def __A ( self , *A , **A ) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.batch_decode(*A_ , **A_ )
def __A ( self , *A , **A ) -> int:
'''simple docstring'''
return self.tokenizer.decode(*A_ , **A_ )
def __A ( self , A , A="np" ) -> List[Any]:
'''simple docstring'''
return self.tokenizer.get_prompt_ids(A_ , return_tensors=A_ )
| 700 |
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
a_ : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
a_ : List[str] = 256
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = ["""melgan"""]
def __init__( self , A , A , A , A , A , ) -> None:
'''simple docstring'''
super().__init__()
# From MELGAN
__magic_name__ = math.log(1E-5 ) # Matches MelGAN training.
__magic_name__ = 4.0 # Largest value for most examples
__magic_name__ = 1_28
self.register_modules(
notes_encoder=A , continuous_encoder=A , decoder=A , scheduler=A , melgan=A , )
def __A ( self , A , A=(-1.0, 1.0) , A=False ) -> List[Any]:
'''simple docstring'''
__magic_name__ , __magic_name__ = output_range
if clip:
__magic_name__ = torch.clip(A , self.min_value , self.max_value )
# Scale to [0, 1].
__magic_name__ = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def __A ( self , A , A=(-1.0, 1.0) , A=False ) -> Optional[int]:
'''simple docstring'''
__magic_name__ , __magic_name__ = input_range
__magic_name__ = torch.clip(A , A , A ) if clip else outputs
# Scale to [0, 1].
__magic_name__ = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def __A ( self , A , A , A ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = input_tokens > 0
__magic_name__ , __magic_name__ = self.notes_encoder(
encoder_input_tokens=A , encoder_inputs_mask=A )
__magic_name__ , __magic_name__ = self.continuous_encoder(
encoder_inputs=A , encoder_inputs_mask=A )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def __A ( self , A , A , A ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = noise_time
if not torch.is_tensor(A ):
__magic_name__ = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(A ) and len(timesteps.shape ) == 0:
__magic_name__ = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__magic_name__ = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
__magic_name__ = self.decoder(
encodings_and_masks=A , decoder_input_tokens=A , decoder_noise_time=A )
return logits
@torch.no_grad()
def __call__( self , A , A = None , A = 1_00 , A = True , A = "numpy" , A = None , A = 1 , ) -> Union[AudioPipelineOutput, Tuple]:
'''simple docstring'''
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A , A ) or callback_steps <= 0)
):
raise ValueError(
F'`callback_steps` has to be a positive integer but is {callback_steps} of type'
F' {type(A )}.' )
__magic_name__ = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
__magic_name__ = np.zeros([1, 0, self.n_dims] , np.floataa )
__magic_name__ = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=A , device=self.device )
for i, encoder_input_tokens in enumerate(A ):
if i == 0:
__magic_name__ = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
__magic_name__ = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=A , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
__magic_name__ = ones
__magic_name__ = self.scale_features(
A , output_range=[-1.0, 1.0] , clip=A )
__magic_name__ = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=A , continuous_mask=A , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
__magic_name__ = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=A , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(A )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__magic_name__ = self.decode(
encodings_and_masks=A , input_tokens=A , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
__magic_name__ = self.scheduler.step(A , A , A , generator=A ).prev_sample
__magic_name__ = self.scale_to_features(A , input_range=[-1.0, 1.0] )
__magic_name__ = mel[:1]
__magic_name__ = mel.cpu().float().numpy()
__magic_name__ = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A , A )
logger.info('''Generated segment''' , A )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'''Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.''' )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'''Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.''' )
if output_type == "numpy":
__magic_name__ = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
__magic_name__ = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=A ) | 678 | 0 |
from torch import nn
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] ):
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f'Unsupported activation function: {act_fn}' ) | 701 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel | 678 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ : Optional[Any] = {'configuration_timm_backbone': ['TimmBackboneConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Tuple = ['TimmBackbone']
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
a_ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 702 |
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] ):
__magic_name__ = SwinConfig(image_size=192 )
if "base" in model_name:
__magic_name__ = 6
__magic_name__ = 128
__magic_name__ = (2, 2, 18, 2)
__magic_name__ = (4, 8, 16, 32)
elif "large" in model_name:
__magic_name__ = 12
__magic_name__ = 192
__magic_name__ = (2, 2, 18, 2)
__magic_name__ = (6, 12, 24, 48)
else:
raise ValueError('''Model not supported, only supports base and large variants''' )
__magic_name__ = window_size
__magic_name__ = embed_dim
__magic_name__ = depths
__magic_name__ = num_heads
return config
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] ):
if "encoder.mask_token" in name:
__magic_name__ = name.replace('''encoder.mask_token''' , '''embeddings.mask_token''' )
if "encoder.patch_embed.proj" in name:
__magic_name__ = name.replace('''encoder.patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "encoder.patch_embed.norm" in name:
__magic_name__ = name.replace('''encoder.patch_embed.norm''' , '''embeddings.norm''' )
if "attn.proj" in name:
__magic_name__ = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
__magic_name__ = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
__magic_name__ = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
__magic_name__ = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
__magic_name__ = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
__magic_name__ = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
__magic_name__ = '''layernorm.weight'''
if name == "encoder.norm.bias":
__magic_name__ = '''layernorm.bias'''
if "decoder" in name:
pass
else:
__magic_name__ = '''swin.''' + name
return name
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Any ):
for key in orig_state_dict.copy().keys():
__magic_name__ = orig_state_dict.pop(snake_case_ )
if "attn_mask" in key:
pass
elif "qkv" in key:
__magic_name__ = key.split('''.''' )
__magic_name__ = int(key_split[2] )
__magic_name__ = int(key_split[4] )
__magic_name__ = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__magic_name__ = val[:dim, :]
__magic_name__ = val[
dim : dim * 2, :
]
__magic_name__ = val[-dim:, :]
else:
__magic_name__ = val[
:dim
]
__magic_name__ = val[
dim : dim * 2
]
__magic_name__ = val[
-dim:
]
else:
__magic_name__ = val
return orig_state_dict
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[str] , snake_case_ : int , snake_case_ : Any , snake_case_ : str ):
__magic_name__ = torch.load(snake_case_ , map_location='''cpu''' )['''model''']
__magic_name__ = get_swin_config(snake_case_ )
__magic_name__ = SwinForMaskedImageModeling(snake_case_ )
model.eval()
__magic_name__ = convert_state_dict(snake_case_ , snake_case_ )
model.load_state_dict(snake_case_ )
__magic_name__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__magic_name__ = ViTImageProcessor(size={'''height''': 192, '''width''': 192} )
__magic_name__ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
__magic_name__ = image_processor(images=snake_case_ , return_tensors='''pt''' )
with torch.no_grad():
__magic_name__ = model(**snake_case_ ).logits
print(outputs.keys() )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case_ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(snake_case_ )
if push_to_hub:
print(f'Pushing model and image processor for {model_name} to hub' )
model.push_to_hub(f'microsoft/{model_name}' )
image_processor.push_to_hub(f'microsoft/{model_name}' )
if __name__ == "__main__":
a_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='swin-base-simmim-window6-192',
type=str,
choices=['swin-base-simmim-window6-192', 'swin-large-simmim-window12-192'],
help='Name of the Swin SimMIM model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth',
type=str,
help='Path to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
a_ : Optional[Any] = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub) | 678 | 0 |
a_ : Tuple = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
def _SCREAMING_SNAKE_CASE ( ):
__magic_name__ = input('''Enter message: ''' )
__magic_name__ = input('''Enter key [alphanumeric]: ''' )
__magic_name__ = input('''Encrypt/Decrypt [e/d]: ''' )
if mode.lower().startswith('''e''' ):
__magic_name__ = "encrypt"
__magic_name__ = encrypt_message(snake_case_ , snake_case_ )
elif mode.lower().startswith('''d''' ):
__magic_name__ = "decrypt"
__magic_name__ = decrypt_message(snake_case_ , snake_case_ )
print(f'\n{mode.title()}ed message:' )
print(snake_case_ )
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : str ):
return translate_message(snake_case_ , snake_case_ , '''encrypt''' )
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : str ):
return translate_message(snake_case_ , snake_case_ , '''decrypt''' )
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : str , snake_case_ : str ):
__magic_name__ = []
__magic_name__ = 0
__magic_name__ = key.upper()
for symbol in message:
__magic_name__ = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(snake_case_ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(snake_case_ ):
__magic_name__ = 0
else:
translated.append(snake_case_ )
return "".join(snake_case_ )
if __name__ == "__main__":
main() | 703 |
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
return "".join(sorted(snake_case_ ) )
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
return word_by_signature[signature(snake_case_ )]
a_ : str = Path(__file__).parent.joinpath('words.txt').read_text(encoding='utf-8')
a_ : Optional[Any] = sorted({word.strip().lower() for word in data.splitlines()})
a_ : List[Any] = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
a_ : Optional[Any] = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('anagrams.txt', 'w') as file:
file.write('all_anagrams = \n ')
file.write(pprint.pformat(all_anagrams)) | 678 | 0 |
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __A ( self , A , A ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = jnp.ones((batch_size, length) ) / length
return scores
def __A ( self ) -> str:
'''simple docstring'''
__magic_name__ = None
__magic_name__ = 20
__magic_name__ = self._get_uniform_logits(batch_size=2 , length=A )
# tweak scores to not be uniform anymore
__magic_name__ = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
__magic_name__ = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
__magic_name__ = jax.nn.softmax(A , axis=-1 )
__magic_name__ = FlaxTemperatureLogitsWarper(temperature=0.5 )
__magic_name__ = FlaxTemperatureLogitsWarper(temperature=1.3 )
__magic_name__ = jax.nn.softmax(temp_dist_warper_sharper(A , scores.copy() , cur_len=A ) , axis=-1 )
__magic_name__ = jax.nn.softmax(temp_dist_warper_smoother(A , scores.copy() , cur_len=A ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def __A ( self ) -> str:
'''simple docstring'''
__magic_name__ = None
__magic_name__ = 10
__magic_name__ = 2
# create ramp distribution
__magic_name__ = np.broadcast_to(np.arange(A )[None, :] , (batch_size, vocab_size) ).copy()
__magic_name__ = ramp_logits[1:, : vocab_size // 2] + vocab_size
__magic_name__ = FlaxTopKLogitsWarper(3 )
__magic_name__ = top_k_warp(A , A , cur_len=A )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
__magic_name__ = 5
__magic_name__ = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
__magic_name__ = np.broadcast_to(np.arange(A )[None, :] , (batch_size, length) ).copy()
__magic_name__ = top_k_warp_safety_check(A , A , cur_len=A )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def __A ( self ) -> Any:
'''simple docstring'''
__magic_name__ = None
__magic_name__ = 10
__magic_name__ = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
__magic_name__ = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
__magic_name__ = FlaxTopPLogitsWarper(0.8 )
__magic_name__ = np.exp(top_p_warp(A , A , cur_len=A ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
__magic_name__ = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(A , A , atol=1E-3 ) )
# check edge cases with negative and extreme logits
__magic_name__ = np.broadcast_to(np.arange(A )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
__magic_name__ = ramp_logits[1] * 1_00.0
# make sure at least 2 tokens are kept
__magic_name__ = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
__magic_name__ = top_p_warp(A , A , cur_len=A )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ = 20
__magic_name__ = 4
__magic_name__ = 0
__magic_name__ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=A )
# check that min length is applied at length 5
__magic_name__ = ids_tensor((batch_size, 20) , vocab_size=20 )
__magic_name__ = 5
__magic_name__ = self._get_uniform_logits(A , A )
__magic_name__ = min_dist_processor(A , A , cur_len=A )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''' )] )
# check that min length is not applied anymore at length 15
__magic_name__ = self._get_uniform_logits(A , A )
__magic_name__ = 15
__magic_name__ = min_dist_processor(A , A , cur_len=A )
self.assertFalse(jnp.isinf(A ).any() )
def __A ( self ) -> List[Any]:
'''simple docstring'''
__magic_name__ = 20
__magic_name__ = 4
__magic_name__ = 0
__magic_name__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=A )
# check that all scores are -inf except the bos_token_id score
__magic_name__ = ids_tensor((batch_size, 1) , vocab_size=20 )
__magic_name__ = 1
__magic_name__ = self._get_uniform_logits(A , A )
__magic_name__ = logits_processor(A , A , cur_len=A )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
__magic_name__ = 3
__magic_name__ = self._get_uniform_logits(A , A )
__magic_name__ = logits_processor(A , A , cur_len=A )
self.assertFalse(jnp.isinf(A ).any() )
def __A ( self ) -> List[Any]:
'''simple docstring'''
__magic_name__ = 20
__magic_name__ = 4
__magic_name__ = 0
__magic_name__ = 5
__magic_name__ = FlaxForcedEOSTokenLogitsProcessor(max_length=A , eos_token_id=A )
# check that all scores are -inf except the eos_token_id when max_length is reached
__magic_name__ = ids_tensor((batch_size, 4) , vocab_size=20 )
__magic_name__ = 4
__magic_name__ = self._get_uniform_logits(A , A )
__magic_name__ = logits_processor(A , A , cur_len=A )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
__magic_name__ = 3
__magic_name__ = self._get_uniform_logits(A , A )
__magic_name__ = logits_processor(A , A , cur_len=A )
self.assertFalse(jnp.isinf(A ).any() )
def __A ( self ) -> Tuple:
'''simple docstring'''
__magic_name__ = 4
__magic_name__ = 10
__magic_name__ = 15
__magic_name__ = 2
__magic_name__ = 1
__magic_name__ = 15
# dummy input_ids and scores
__magic_name__ = ids_tensor((batch_size, sequence_length) , A )
__magic_name__ = input_ids.copy()
__magic_name__ = self._get_uniform_logits(A , A )
__magic_name__ = scores.copy()
# instantiate all dist processors
__magic_name__ = FlaxTemperatureLogitsWarper(temperature=0.5 )
__magic_name__ = FlaxTopKLogitsWarper(3 )
__magic_name__ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
__magic_name__ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=A )
__magic_name__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=A )
__magic_name__ = FlaxForcedEOSTokenLogitsProcessor(max_length=A , eos_token_id=A )
__magic_name__ = 10
# no processor list
__magic_name__ = temp_dist_warp(A , A , cur_len=A )
__magic_name__ = top_k_warp(A , A , cur_len=A )
__magic_name__ = top_p_warp(A , A , cur_len=A )
__magic_name__ = min_dist_proc(A , A , cur_len=A )
__magic_name__ = bos_dist_proc(A , A , cur_len=A )
__magic_name__ = eos_dist_proc(A , A , cur_len=A )
# with processor list
__magic_name__ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
__magic_name__ = processor(A , A , cur_len=A )
# scores should be equal
self.assertTrue(jnp.allclose(A , A , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def __A ( self ) -> str:
'''simple docstring'''
__magic_name__ = 4
__magic_name__ = 10
__magic_name__ = 15
__magic_name__ = 2
__magic_name__ = 1
__magic_name__ = 15
# dummy input_ids and scores
__magic_name__ = ids_tensor((batch_size, sequence_length) , A )
__magic_name__ = input_ids.copy()
__magic_name__ = self._get_uniform_logits(A , A )
__magic_name__ = scores.copy()
# instantiate all dist processors
__magic_name__ = FlaxTemperatureLogitsWarper(temperature=0.5 )
__magic_name__ = FlaxTopKLogitsWarper(3 )
__magic_name__ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
__magic_name__ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=A )
__magic_name__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=A )
__magic_name__ = FlaxForcedEOSTokenLogitsProcessor(max_length=A , eos_token_id=A )
__magic_name__ = 10
# no processor list
def run_no_processor_list(A , A , A ):
__magic_name__ = temp_dist_warp(A , A , cur_len=A )
__magic_name__ = top_k_warp(A , A , cur_len=A )
__magic_name__ = top_p_warp(A , A , cur_len=A )
__magic_name__ = min_dist_proc(A , A , cur_len=A )
__magic_name__ = bos_dist_proc(A , A , cur_len=A )
__magic_name__ = eos_dist_proc(A , A , cur_len=A )
return scores
# with processor list
def run_processor_list(A , A , A ):
__magic_name__ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
__magic_name__ = processor(A , A , cur_len=A )
return scores
__magic_name__ = jax.jit(A )
__magic_name__ = jax.jit(A )
__magic_name__ = jitted_run_no_processor_list(A , A , A )
__magic_name__ = jitted_run_processor_list(A , A , A )
# scores should be equal
self.assertTrue(jnp.allclose(A , A , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() ) | 704 |
from __future__ import annotations
from scipy.special import comb # type: ignore
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , A ) -> Tuple:
'''simple docstring'''
__magic_name__ = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
__magic_name__ = len(A ) - 1
def __A ( self , A ) -> list[float]:
'''simple docstring'''
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__magic_name__ = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , A ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(A ) , 5 ) == 1
return output_values
def __A ( self , A ) -> tuple[float, float]:
'''simple docstring'''
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__magic_name__ = self.basis_function(A )
__magic_name__ = 0.0
__magic_name__ = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def __A ( self , A = 0.01 ) -> Tuple:
'''simple docstring'''
from matplotlib import pyplot as plt # type: ignore
__magic_name__ = [] # x coordinates of points to plot
__magic_name__ = [] # y coordinates of points to plot
__magic_name__ = 0.0
while t <= 1:
__magic_name__ = self.bezier_curve_function(A )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
__magic_name__ = [i[0] for i in self.list_of_points]
__magic_name__ = [i[1] for i in self.list_of_points]
plt.plot(
A , A , color='''blue''' , label='''Curve of Degree ''' + str(self.degree ) , )
plt.scatter(A , A , color='''red''' , label='''Control Points''' )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3 | 678 | 0 |
'''simple docstring'''
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class SCREAMING_SNAKE_CASE_ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_a = """Wav2Vec2FeatureExtractor"""
_a = """AutoTokenizer"""
def __init__( self , A , A ) -> str:
'''simple docstring'''
super().__init__(_a , _a )
__magic_name__ = self.feature_extractor
__magic_name__ = False
@classmethod
def __A ( cls , A , **A ) -> Any:
'''simple docstring'''
try:
return super().from_pretrained(_a , **_a )
except OSError:
warnings.warn(
F'Loading a tokenizer inside {cls.__name__} from a config that does not'
''' include a `tokenizer_class` attribute is deprecated and will be '''
'''removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`'''
''' attribute to either your `config.json` or `tokenizer_config.json` '''
'''file to suppress this warning: ''' , _a , )
__magic_name__ = WavaVecaFeatureExtractor.from_pretrained(_a , **_a )
__magic_name__ = WavaVecaCTCTokenizer.from_pretrained(_a , **_a )
return cls(feature_extractor=_a , tokenizer=_a )
def __call__( self , *A , **A ) -> Dict:
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*_a , **_a )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
__magic_name__ = kwargs.pop('''raw_speech''' )
else:
__magic_name__ = kwargs.pop('''audio''' , _a )
__magic_name__ = kwargs.pop('''sampling_rate''' , _a )
__magic_name__ = kwargs.pop('''text''' , _a )
if len(_a ) > 0:
__magic_name__ = args[0]
__magic_name__ = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
__magic_name__ = self.feature_extractor(_a , *_a , sampling_rate=_a , **_a )
if text is not None:
__magic_name__ = self.tokenizer(_a , **_a )
if text is None:
return inputs
elif audio is None:
return encodings
else:
__magic_name__ = encodings["""input_ids"""]
return inputs
def __A ( self , *A , **A ) -> Optional[int]:
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor.pad(*_a , **_a )
__magic_name__ = kwargs.pop('''input_features''' , _a )
__magic_name__ = kwargs.pop('''labels''' , _a )
if len(_a ) > 0:
__magic_name__ = args[0]
__magic_name__ = args[1:]
if input_features is not None:
__magic_name__ = self.feature_extractor.pad(_a , *_a , **_a )
if labels is not None:
__magic_name__ = self.tokenizer.pad(_a , **_a )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
__magic_name__ = labels["""input_ids"""]
return input_features
def __A ( self , *A , **A ) -> Optional[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*_a , **_a )
def __A ( self , *A , **A ) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.decode(*_a , **_a )
@contextmanager
def __A ( self ) -> int:
'''simple docstring'''
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
__magic_name__ = True
__magic_name__ = self.tokenizer
yield
__magic_name__ = self.feature_extractor
__magic_name__ = False | 705 |
import re
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
__magic_name__ = re.compile(
r'''^(?:0|94|\+94|0{2}94)''' r'''7(0|1|2|4|5|6|7|8)''' r'''(-| |)''' r'''\d{7}$''' )
return bool(re.search(snake_case_ , snake_case_ ) )
if __name__ == "__main__":
a_ : Optional[int] = '0094702343221'
print(is_sri_lankan_phone_number(phone)) | 678 | 0 |
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
def __init__( self , A="" , A="train" ) -> Optional[int]:
'''simple docstring'''
assert os.path.isdir(a_ )
__magic_name__ = []
__magic_name__ = os.listdir(a_ )
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
__magic_name__ = os.path.join(a_ , a_ )
if not os.path.isfile(a_ ):
continue
self.documents.append(a_ )
def __len__( self ) -> Any:
'''simple docstring'''
return len(self.documents )
def __getitem__( self , A ) -> List[Any]:
'''simple docstring'''
__magic_name__ = self.documents[idx]
__magic_name__ = document_path.split('''/''' )[-1]
with open(a_ , encoding='''utf-8''' ) as source:
__magic_name__ = source.read()
__magic_name__ = process_story(a_ )
return document_name, story_lines, summary_lines
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
__magic_name__ = list(filter(lambda snake_case_ : len(snake_case__ ) != 0 , [line.strip() for line in raw_story.split('''\n''' )] ) )
# for some unknown reason some lines miss a period, add it
__magic_name__ = [_add_missing_period(snake_case__ ) for line in nonempty_lines]
# gather article lines
__magic_name__ = []
__magic_name__ = deque(snake_case__ )
while True:
try:
__magic_name__ = lines.popleft()
if element.startswith('''@highlight''' ):
break
story_lines.append(snake_case__ )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
__magic_name__ = list(filter(lambda snake_case_ : not t.startswith('''@highlight''' ) , snake_case__ ) )
return story_lines, summary_lines
def _SCREAMING_SNAKE_CASE ( snake_case_ : int ):
__magic_name__ = [""".""", """!""", """?""", """...""", """'""", """`""", """\"""", """\u2019""", """\u2019""", """)"""]
if line.startswith('''@highlight''' ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def _SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : List[Any] , snake_case_ : Any ):
if len(snake_case__ ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(snake_case__ )) )
return sequence
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] , snake_case_ : Any ):
__magic_name__ = torch.ones_like(snake_case__ )
__magic_name__ = sequence == pad_token_id
__magic_name__ = 0
return mask
def _SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : Tuple , snake_case_ : Optional[int] ):
__magic_name__ = [tokenizer.encode(snake_case__ ) for line in story_lines]
__magic_name__ = [token for sentence in story_lines_token_ids for token in sentence]
__magic_name__ = [tokenizer.encode(snake_case__ ) for line in summary_lines]
__magic_name__ = [token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Optional[Any] ):
__magic_name__ = []
for sequence in batch:
__magic_name__ = -1
__magic_name__ = []
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(snake_case__ )
return torch.tensor(snake_case__ )
| 706 |
import os
import sys
import unittest
a_ : int = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
a_ : Optional[Any] = os.path.join(git_repo_path, 'src', 'diffusers')
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = find_backend(''' if not is_torch_available():''' )
self.assertEqual(A , '''torch''' )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
__magic_name__ = find_backend(''' if not (is_torch_available() and is_transformers_available()):''' )
self.assertEqual(A , '''torch_and_transformers''' )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
__magic_name__ = find_backend(
''' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):''' )
self.assertEqual(A , '''torch_and_transformers_and_onnx''' )
def __A ( self ) -> str:
'''simple docstring'''
__magic_name__ = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , A )
self.assertIn('''torch_and_transformers''' , A )
self.assertIn('''flax_and_transformers''' , A )
self.assertIn('''torch_and_transformers_and_onnx''' , A )
# Likewise, we can't assert on the exact content of a key
self.assertIn('''UNet2DModel''' , objects['''torch'''] )
self.assertIn('''FlaxUNet2DConditionModel''' , objects['''flax'''] )
self.assertIn('''StableDiffusionPipeline''' , objects['''torch_and_transformers'''] )
self.assertIn('''FlaxStableDiffusionPipeline''' , objects['''flax_and_transformers'''] )
self.assertIn('''LMSDiscreteScheduler''' , objects['''torch_and_scipy'''] )
self.assertIn('''OnnxStableDiffusionPipeline''' , objects['''torch_and_transformers_and_onnx'''] )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = create_dummy_object('''CONSTANT''' , '''\'torch\'''' )
self.assertEqual(A , '''\nCONSTANT = None\n''' )
__magic_name__ = create_dummy_object('''function''' , '''\'torch\'''' )
self.assertEqual(
A , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' )
__magic_name__ = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
'''
__magic_name__ = create_dummy_object('''FakeClass''' , '''\'torch\'''' )
self.assertEqual(A , A )
def __A ( self ) -> int:
'''simple docstring'''
__magic_name__ = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
'''
__magic_name__ = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} )
self.assertEqual(dummy_files['''torch'''] , A ) | 678 | 0 |
def _SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : str ):
__magic_name__ = 0
__magic_name__ = len(__snake_case ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__magic_name__ = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(__snake_case ):
return None
__magic_name__ = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
__magic_name__ = left
__magic_name__ = point
elif point > right:
__magic_name__ = right
__magic_name__ = point
else:
if item < current_item:
__magic_name__ = point - 1
else:
__magic_name__ = point + 1
return None
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[str] , snake_case_ : Union[str, Any] , snake_case_ : int , snake_case_ : List[Any] ):
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__magic_name__ = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(__snake_case ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(__snake_case , __snake_case , __snake_case , __snake_case )
elif point > right:
return interpolation_search_by_recursion(__snake_case , __snake_case , __snake_case , __snake_case )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
__snake_case , __snake_case , __snake_case , point - 1 )
else:
return interpolation_search_by_recursion(
__snake_case , __snake_case , point + 1 , __snake_case )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] ):
if collection != sorted(__snake_case ):
raise ValueError('''Collection must be ascending sorted''' )
return True
if __name__ == "__main__":
import sys
a_ : Optional[Any] = 0
if debug == 1:
a_ : str = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit('Sequence must be ascending sorted to apply interpolation search')
a_ : Tuple = 67
a_ : int = interpolation_search(collection, target)
if result is not None:
print(F"""{target} found at positions: {result}""")
else:
print('Not found') | 707 |
def _SCREAMING_SNAKE_CASE ( snake_case_ : list[list[int]] , snake_case_ : int , snake_case_ : int , snake_case_ : set ):
__magic_name__ , __magic_name__ = len(snake_case_ ), len(grid[0] )
if (
min(snake_case_ , snake_case_ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
__magic_name__ = 0
count += depth_first_search(snake_case_ , row + 1 , snake_case_ , snake_case_ )
count += depth_first_search(snake_case_ , row - 1 , snake_case_ , snake_case_ )
count += depth_first_search(snake_case_ , snake_case_ , col + 1 , snake_case_ )
count += depth_first_search(snake_case_ , snake_case_ , col - 1 , snake_case_ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod() | 678 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a_ : Optional[Any] = {
"configuration_swiftformer": [
"SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SwiftFormerConfig",
"SwiftFormerOnnxConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Any = [
"SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwiftFormerForImageClassification",
"SwiftFormerModel",
"SwiftFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
a_ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 708 |
a_ : Dict = {
'meter': 'm',
'kilometer': 'km',
'megametre': 'Mm',
'gigametre': 'Gm',
'terametre': 'Tm',
'petametre': 'Pm',
'exametre': 'Em',
'zettametre': 'Zm',
'yottametre': 'Ym',
}
# Exponent of the factor(meter)
a_ : str = {
'm': 0,
'km': 3,
'Mm': 6,
'Gm': 9,
'Tm': 12,
'Pm': 15,
'Em': 18,
'Zm': 21,
'Ym': 24,
}
def _SCREAMING_SNAKE_CASE ( snake_case_ : float , snake_case_ : str , snake_case_ : str ):
__magic_name__ = from_type.lower().strip('''s''' )
__magic_name__ = to_type.lower().strip('''s''' )
__magic_name__ = UNIT_SYMBOL.get(snake_case_ , snake_case_ )
__magic_name__ = UNIT_SYMBOL.get(snake_case_ , snake_case_ )
if from_sanitized not in METRIC_CONVERSION:
__magic_name__ = (
f'Invalid \'from_type\' value: {from_type!r}.\n'
f'Conversion abbreviations are: {", ".join(snake_case_ )}'
)
raise ValueError(snake_case_ )
if to_sanitized not in METRIC_CONVERSION:
__magic_name__ = (
f'Invalid \'to_type\' value: {to_type!r}.\n'
f'Conversion abbreviations are: {", ".join(snake_case_ )}'
)
raise ValueError(snake_case_ )
__magic_name__ = METRIC_CONVERSION[from_sanitized]
__magic_name__ = METRIC_CONVERSION[to_sanitized]
__magic_name__ = 1
if from_exponent > to_exponent:
__magic_name__ = from_exponent - to_exponent
else:
__magic_name__ = -(to_exponent - from_exponent)
return value * pow(10 , snake_case_ )
if __name__ == "__main__":
from doctest import testmod
testmod() | 678 | 0 |
from scipy.stats import pearsonr
import datasets
a_ : Union[str, Any] = '''
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
'''
a_ : Union[str, Any] = '''
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results[\'pearsonr\'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
[\'p-value\', \'pearsonr\']
>>> print(round(results[\'pearsonr\'], 2))
-0.74
>>> print(round(results[\'p-value\'], 2))
0.15
'''
a_ : List[Any] = '''
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE_ ( datasets.Metric ):
"""simple docstring"""
def __A ( self ) -> Any:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def __A ( self , A , A , A=False ) -> List[Any]:
'''simple docstring'''
if return_pvalue:
__magic_name__ = pearsonr(lowerCAmelCase__ , lowerCAmelCase__ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(lowerCAmelCase__ , lowerCAmelCase__ )[0] )} | 709 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
a_ : Union[str, Any] = {
'configuration_longt5': ['LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LongT5Config', 'LongT5OnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : int = [
'LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'LongT5EncoderModel',
'LongT5ForConditionalGeneration',
'LongT5Model',
'LongT5PreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Dict = [
'FlaxLongT5ForConditionalGeneration',
'FlaxLongT5Model',
'FlaxLongT5PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
a_ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 678 | 0 |
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( snake_case_ : list[int] ):
if not nums:
return 0
__magic_name__ = nums[0]
__magic_name__ = 0
for num in nums[1:]:
__magic_name__ , __magic_name__ = (
max_excluding + num,
max(UpperCAmelCase__ , UpperCAmelCase__ ),
)
return max(UpperCAmelCase__ , UpperCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod() | 710 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , A , A=13 , A=7 , A=True , A=True , A=True , A=99 , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=5_12 , A=16 , A=2 , A=0.02 , A=3 , A=4 , A=None , ) -> str:
'''simple docstring'''
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = seq_length
__magic_name__ = is_training
__magic_name__ = use_token_type_ids
__magic_name__ = use_labels
__magic_name__ = vocab_size
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = intermediate_size
__magic_name__ = hidden_act
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = max_position_embeddings
__magic_name__ = type_vocab_size
__magic_name__ = type_sequence_label_size
__magic_name__ = initializer_range
__magic_name__ = num_labels
__magic_name__ = num_choices
__magic_name__ = scope
__magic_name__ = self.vocab_size - 1
def __A ( self ) -> str:
'''simple docstring'''
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ = None
if self.use_token_type_ids:
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
if self.use_labels:
__magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
__magic_name__ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def __A ( self , A , A , A , A , *A ) -> Tuple:
'''simple docstring'''
__magic_name__ = OpenAIGPTModel(config=A )
model.to(A )
model.eval()
__magic_name__ = model(A , token_type_ids=A , head_mask=A )
__magic_name__ = model(A , token_type_ids=A )
__magic_name__ = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , A , A , A , A , *A ) -> Dict:
'''simple docstring'''
__magic_name__ = OpenAIGPTLMHeadModel(A )
model.to(A )
model.eval()
__magic_name__ = model(A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , A , A , A , A , *A ) -> List[Any]:
'''simple docstring'''
__magic_name__ = OpenAIGPTDoubleHeadsModel(A )
model.to(A )
model.eval()
__magic_name__ = model(A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , A , A , A , A , *A ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = self.num_labels
__magic_name__ = OpenAIGPTForSequenceClassification(A )
model.to(A )
model.eval()
__magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ = model(A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self ) -> Dict:
'''simple docstring'''
__magic_name__ = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) = config_and_inputs
__magic_name__ = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_a = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
_a = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
_a = (
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def __A ( self , A , A , A , A , A ) -> List[str]:
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def __A ( self , A , A , A=False ) -> List[str]:
'''simple docstring'''
__magic_name__ = super()._prepare_for_class(A , A , return_labels=A )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
__magic_name__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=A , )
__magic_name__ = inputs_dict['''labels''']
__magic_name__ = inputs_dict['''labels''']
__magic_name__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=A , )
__magic_name__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A )
return inputs_dict
def __A ( self ) -> str:
'''simple docstring'''
__magic_name__ = OpenAIGPTModelTester(self )
__magic_name__ = ConfigTester(self , config_class=A , n_embd=37 )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __A ( self ) -> Any:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*A )
def __A ( self ) -> Dict:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*A )
def __A ( self ) -> List[str]:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*A )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*A )
@slow
def __A ( self ) -> List[str]:
'''simple docstring'''
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ = OpenAIGPTModel.from_pretrained(A )
self.assertIsNotNone(A )
@require_torch
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __A ( self ) -> Tuple:
'''simple docstring'''
__magic_name__ = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' )
model.to(A )
__magic_name__ = torch.tensor([[4_81, 47_35, 5_44]] , dtype=torch.long , device=A ) # the president is
__magic_name__ = [
4_81,
47_35,
5_44,
2_46,
9_63,
8_70,
7_62,
2_39,
2_44,
4_04_77,
2_44,
2_49,
7_19,
8_81,
4_87,
5_44,
2_40,
2_44,
6_03,
4_81,
] # the president is a very good man. " \n " i\'m sure he is, " said the
__magic_name__ = model.generate(A , do_sample=A )
self.assertListEqual(output_ids[0].tolist() , A ) | 678 | 0 |
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : List[str] = None , snake_case_ : str = None ):
if start is None:
__magic_name__ = 0
if end is None:
__magic_name__ = len(_snake_case ) - 1
if start >= end:
return
__magic_name__ = (start + end) // 2
slowsort(_snake_case , _snake_case , _snake_case )
slowsort(_snake_case , mid + 1 , _snake_case )
if sequence[end] < sequence[mid]:
__magic_name__ , __magic_name__ = sequence[mid], sequence[end]
slowsort(_snake_case , _snake_case , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod() | 711 |
def _SCREAMING_SNAKE_CASE ( ):
__magic_name__ = []
__magic_name__ = 1
while len(snake_case_ ) < 1E6:
constant.append(str(snake_case_ ) )
i += 1
__magic_name__ = ''''''.join(snake_case_ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[9_9999] )
* int(constant[99_9999] )
)
if __name__ == "__main__":
print(solution()) | 678 | 0 |
import os
import sys
import unittest
a_ : Dict = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
a_ : str = os.path.join(git_repo_path, 'src', 'transformers')
a_ : Optional[int] = '\n{0} = None\n'
a_ : Any = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n'
a_ : Dict = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> Dict:
'''simple docstring'''
__magic_name__ = find_backend(''' _import_structure["models.albert"].append("AlbertTokenizerFast")''' )
self.assertIsNone(UpperCamelCase__ )
__magic_name__ = find_backend(''' if not is_tokenizers_available():''' )
self.assertEqual(UpperCamelCase__ , '''tokenizers''' )
__magic_name__ = find_backend(''' if not is_tensorflow_text_available():''' )
self.assertEqual(UpperCamelCase__ , '''tensorflow_text''' )
__magic_name__ = find_backend(''' if not (is_sentencepiece_available() and is_tokenizers_available()):''' )
self.assertEqual(UpperCamelCase__ , '''sentencepiece_and_tokenizers''' )
__magic_name__ = find_backend(
''' if not (is_sentencepiece_available() and is_tensorflow_text_available()):''' )
self.assertEqual(UpperCamelCase__ , '''sentencepiece_and_tensorflow_text''' )
__magic_name__ = find_backend(
''' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):''' )
self.assertEqual(UpperCamelCase__ , '''sentencepiece_and_tokenizers_and_vision''' )
def __A ( self ) -> Any:
'''simple docstring'''
__magic_name__ = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , UpperCamelCase__ )
self.assertIn('''tensorflow_text''' , UpperCamelCase__ )
self.assertIn('''sentencepiece_and_tokenizers''' , UpperCamelCase__ )
# Likewise, we can't assert on the exact content of a key
self.assertIn('''BertModel''' , objects['''torch'''] )
self.assertIn('''TFBertModel''' , objects['''tf'''] )
self.assertIn('''FlaxBertModel''' , objects['''flax'''] )
self.assertIn('''BertModel''' , objects['''torch'''] )
self.assertIn('''TFBertTokenizer''' , objects['''tensorflow_text'''] )
self.assertIn('''convert_slow_tokenizer''' , objects['''sentencepiece_and_tokenizers'''] )
def __A ( self ) -> int:
'''simple docstring'''
__magic_name__ = create_dummy_object('''CONSTANT''' , '''\'torch\'''' )
self.assertEqual(UpperCamelCase__ , '''\nCONSTANT = None\n''' )
__magic_name__ = create_dummy_object('''function''' , '''\'torch\'''' )
self.assertEqual(
UpperCamelCase__ , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' )
__magic_name__ = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
'''
__magic_name__ = create_dummy_object('''FakeClass''' , '''\'torch\'''' )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
'''
__magic_name__ = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} )
self.assertEqual(dummy_files['''torch'''] , UpperCamelCase__ ) | 712 |
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
a_ : str = True
except ImportError:
a_ : Optional[int] = False
try:
from torch.hub import _get_torch_home
a_ : Optional[Any] = _get_torch_home()
except ImportError:
a_ : List[Any] = os.path.expanduser(
os.getenv('TORCH_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch'))
)
a_ : Any = os.path.join(torch_cache_home, 'transformers')
a_ : Any = 'https://cdn.huggingface.co'
a_ : Any = 'https://s3.amazonaws.com/models.huggingface.co/bert'
a_ : int = '/'.join(str(Path(__file__).resolve()).split('/')[:-1])
a_ : Any = os.path.join(PATH, 'config.yaml')
a_ : Any = os.path.join(PATH, 'attributes.txt')
a_ : Any = os.path.join(PATH, 'objects.txt')
a_ : List[Any] = os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', default_cache_path)
a_ : Any = os.getenv('PYTORCH_TRANSFORMERS_CACHE', PYTORCH_PRETRAINED_BERT_CACHE)
a_ : Optional[int] = os.getenv('TRANSFORMERS_CACHE', PYTORCH_TRANSFORMERS_CACHE)
a_ : int = 'pytorch_model.bin'
a_ : Union[str, Any] = 'config.yaml'
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any]=OBJECTS , snake_case_ : str=ATTRIBUTES ):
__magic_name__ = []
with open(snake_case_ ) as f:
for object in f.readlines():
vg_classes.append(object.split(''',''' )[0].lower().strip() )
__magic_name__ = []
with open(snake_case_ ) as f:
for object in f.readlines():
vg_attrs.append(object.split(''',''' )[0].lower().strip() )
return vg_classes, vg_attrs
def _SCREAMING_SNAKE_CASE ( snake_case_ : int ):
__magic_name__ = OrderedDict()
with open(snake_case_ , '''rb''' ) as f:
__magic_name__ = pkl.load(snake_case_ )['''model''']
for k in copy.deepcopy(list(ckp.keys() ) ):
__magic_name__ = ckp.pop(snake_case_ )
if isinstance(snake_case_ , np.ndarray ):
__magic_name__ = torch.tensor(snake_case_ )
else:
assert isinstance(snake_case_ , torch.tensor ), type(snake_case_ )
__magic_name__ = v
return r
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
_a = {}
def __init__( self , A , A = "root" , A=0 ) -> List[str]:
'''simple docstring'''
__magic_name__ = name
__magic_name__ = level
__magic_name__ = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
__magic_name__ = copy.deepcopy(A )
__magic_name__ = copy.deepcopy(A )
if isinstance(A , A ):
__magic_name__ = Config(A , name=A , level=level + 1 )
__magic_name__ = v
setattr(self , A , A )
__magic_name__ = d
def __repr__( self ) -> Union[str, Any]:
'''simple docstring'''
return str(list((self._pointer.keys()) ) )
def __setattr__( self , A , A ) -> Tuple:
'''simple docstring'''
__magic_name__ = val
__magic_name__ = val
__magic_name__ = key.split('''.''' )
__magic_name__ = len(A ) - 1
__magic_name__ = self._pointer
if len(A ) > 1:
for i, l in enumerate(A ):
if hasattr(self , A ) and isinstance(getattr(self , A ) , A ):
setattr(getattr(self , A ) , '''.'''.join(levels[i:] ) , A )
if l == last_level:
__magic_name__ = val
else:
__magic_name__ = pointer[l]
def __A ( self ) -> List[Any]:
'''simple docstring'''
return self._pointer
def __A ( self , A , A ) -> Any:
'''simple docstring'''
with open(F'{file_name}' , '''w''' ) as stream:
dump(A , A )
def __A ( self , A , A ) -> List[Any]:
'''simple docstring'''
with open(F'{file_name}' , '''w''' ) as stream:
json.dump(A , A )
@staticmethod
def __A ( A ) -> Optional[Any]:
'''simple docstring'''
with open(A ) as stream:
__magic_name__ = load(A , Loader=A )
return data
def __str__( self ) -> List[Any]:
'''simple docstring'''
__magic_name__ = ''' '''
if self._name != "root":
__magic_name__ = F'{t * (self._level-1)}{self._name}:\n'
else:
__magic_name__ = ''''''
__magic_name__ = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(A , A ):
r += F'{t * (self._level)}{v}\n'
self._level += 1
else:
r += F'{t * (self._level)}{k}: {v} ({type(A ).__name__})\n'
__magic_name__ = level
return r[:-1]
@classmethod
def __A ( cls , A , **A ) -> int:
'''simple docstring'''
__magic_name__ , __magic_name__ = cls.get_config_dict(A , **A )
return cls(A )
@classmethod
def __A ( cls , A , **A ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = kwargs.pop('''cache_dir''' , A )
__magic_name__ = kwargs.pop('''force_download''' , A )
__magic_name__ = kwargs.pop('''resume_download''' , A )
__magic_name__ = kwargs.pop('''proxies''' , A )
__magic_name__ = kwargs.pop('''local_files_only''' , A )
if os.path.isdir(A ):
__magic_name__ = os.path.join(A , A )
elif os.path.isfile(A ) or is_remote_url(A ):
__magic_name__ = pretrained_model_name_or_path
else:
__magic_name__ = hf_bucket_url(A , filename=A , use_cdn=A )
try:
# Load from URL or cache if already cached
__magic_name__ = cached_path(
A , cache_dir=A , force_download=A , proxies=A , resume_download=A , local_files_only=A , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
__magic_name__ = Config.load_yaml(A )
except EnvironmentError:
__magic_name__ = '''Can\'t load config for'''
raise EnvironmentError(A )
if resolved_config_file == config_file:
print('''loading configuration file from path''' )
else:
print('''loading configuration file cache''' )
return Config.load_yaml(A ), kwargs
def _SCREAMING_SNAKE_CASE ( snake_case_ : Tuple ):
__magic_name__ = torch.load('''dump.pt''' , map_location=in_tensor.device )
__magic_name__ = in_tensor.numpy()
__magic_name__ = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(snake_case_ , snake_case_ , rtol=0.01 , atol=0.1 ), (
f'{sum([1 for x in np.isclose(snake_case_ , snake_case_ , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %'
" element-wise mismatch"
)
raise Exception('''tensors are all good''' )
# Hugging face functions below
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] ):
__magic_name__ = urlparse(snake_case_ )
return parsed.scheme in ("http", "https")
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : str , snake_case_ : Optional[Any]=True ):
__magic_name__ = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
__magic_name__ = '''/''' not in model_id
if legacy_format:
return f'{endpoint}/{model_id}-{filename}'
else:
return f'{endpoint}/{model_id}/{filename}'
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Tuple , snake_case_ : List[str]=None , snake_case_ : Dict=0 , snake_case_ : Tuple=None , ):
__magic_name__ = '''python/{}'''.format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(snake_case_ , snake_case_ ):
ua += "; " + "; ".join('''{}/{}'''.format(snake_case_ , snake_case_ ) for k, v in user_agent.items() )
elif isinstance(snake_case_ , snake_case_ ):
ua += "; " + user_agent
__magic_name__ = {'''user-agent''': ua}
if resume_size > 0:
__magic_name__ = '''bytes=%d-''' % (resume_size,)
__magic_name__ = requests.get(snake_case_ , stream=snake_case_ , proxies=snake_case_ , headers=snake_case_ )
if response.status_code == 416: # Range not satisfiable
return
__magic_name__ = response.headers.get('''Content-Length''' )
__magic_name__ = resume_size + int(snake_case_ ) if content_length is not None else None
__magic_name__ = tqdm(
unit='''B''' , unit_scale=snake_case_ , total=snake_case_ , initial=snake_case_ , desc='''Downloading''' , )
for chunk in response.iter_content(chunk_size=1024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(snake_case_ ) )
temp_file.write(snake_case_ )
progress.close()
def _SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : Dict=None , snake_case_ : int=False , snake_case_ : List[Any]=None , snake_case_ : Tuple=10 , snake_case_ : int=False , snake_case_ : Any=None , snake_case_ : Tuple=False , ):
if cache_dir is None:
__magic_name__ = TRANSFORMERS_CACHE
if isinstance(snake_case_ , snake_case_ ):
__magic_name__ = str(snake_case_ )
os.makedirs(snake_case_ , exist_ok=snake_case_ )
__magic_name__ = None
if not local_files_only:
try:
__magic_name__ = requests.head(snake_case_ , allow_redirects=snake_case_ , proxies=snake_case_ , timeout=snake_case_ )
if response.status_code == 200:
__magic_name__ = response.headers.get('''ETag''' )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
__magic_name__ = url_to_filename(snake_case_ , snake_case_ )
# get cache path to put the file
__magic_name__ = os.path.join(snake_case_ , snake_case_ )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(snake_case_ ):
return cache_path
else:
__magic_name__ = [
file
for file in fnmatch.filter(os.listdir(snake_case_ ) , filename + '''.*''' )
if not file.endswith('''.json''' ) and not file.endswith('''.lock''' )
]
if len(snake_case_ ) > 0:
return os.path.join(snake_case_ , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
'''Cannot find the requested files in the cached path and outgoing traffic has been'''
''' disabled. To enable model look-ups and downloads online, set \'local_files_only\''''
''' to False.''' )
return None
# From now on, etag is not None.
if os.path.exists(snake_case_ ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
__magic_name__ = cache_path + '''.lock'''
with FileLock(snake_case_ ):
# If the download just completed while the lock was activated.
if os.path.exists(snake_case_ ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
__magic_name__ = cache_path + '''.incomplete'''
@contextmanager
def _resumable_file_manager():
with open(snake_case_ , '''a+b''' ) as f:
yield f
__magic_name__ = _resumable_file_manager
if os.path.exists(snake_case_ ):
__magic_name__ = os.stat(snake_case_ ).st_size
else:
__magic_name__ = 0
else:
__magic_name__ = partial(tempfile.NamedTemporaryFile , dir=snake_case_ , delete=snake_case_ )
__magic_name__ = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
'''%s not found in cache or force_download set to True, downloading to %s''' , snake_case_ , temp_file.name , )
http_get(
snake_case_ , snake_case_ , proxies=snake_case_ , resume_size=snake_case_ , user_agent=snake_case_ , )
os.replace(temp_file.name , snake_case_ )
__magic_name__ = {'''url''': url, '''etag''': etag}
__magic_name__ = cache_path + '''.json'''
with open(snake_case_ , '''w''' ) as meta_file:
json.dump(snake_case_ , snake_case_ )
return cache_path
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] , snake_case_ : List[Any]=None ):
__magic_name__ = url.encode('''utf-8''' )
__magic_name__ = shaaaa(snake_case_ )
__magic_name__ = url_hash.hexdigest()
if etag:
__magic_name__ = etag.encode('''utf-8''' )
__magic_name__ = shaaaa(snake_case_ )
filename += "." + etag_hash.hexdigest()
if url.endswith('''.h5''' ):
filename += ".h5"
return filename
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : str=None , snake_case_ : Tuple=False , snake_case_ : Union[str, Any]=None , snake_case_ : List[Any]=False , snake_case_ : Union[str, Any]=None , snake_case_ : List[str]=False , snake_case_ : Optional[int]=False , snake_case_ : Optional[int]=False , ):
if cache_dir is None:
__magic_name__ = TRANSFORMERS_CACHE
if isinstance(snake_case_ , snake_case_ ):
__magic_name__ = str(snake_case_ )
if isinstance(snake_case_ , snake_case_ ):
__magic_name__ = str(snake_case_ )
if is_remote_url(snake_case_ ):
# URL, so get it from the cache (downloading if necessary)
__magic_name__ = get_from_cache(
snake_case_ , cache_dir=snake_case_ , force_download=snake_case_ , proxies=snake_case_ , resume_download=snake_case_ , user_agent=snake_case_ , local_files_only=snake_case_ , )
elif os.path.exists(snake_case_ ):
# File, and it exists.
__magic_name__ = url_or_filename
elif urlparse(snake_case_ ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError('''file {} not found'''.format(snake_case_ ) )
else:
# Something unknown
raise ValueError('''unable to parse {} as a URL or as a local path'''.format(snake_case_ ) )
if extract_compressed_file:
if not is_zipfile(snake_case_ ) and not tarfile.is_tarfile(snake_case_ ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
__magic_name__ , __magic_name__ = os.path.split(snake_case_ )
__magic_name__ = output_file.replace('''.''' , '''-''' ) + '''-extracted'''
__magic_name__ = os.path.join(snake_case_ , snake_case_ )
if os.path.isdir(snake_case_ ) and os.listdir(snake_case_ ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
__magic_name__ = output_path + '''.lock'''
with FileLock(snake_case_ ):
shutil.rmtree(snake_case_ , ignore_errors=snake_case_ )
os.makedirs(snake_case_ )
if is_zipfile(snake_case_ ):
with ZipFile(snake_case_ , '''r''' ) as zip_file:
zip_file.extractall(snake_case_ )
zip_file.close()
elif tarfile.is_tarfile(snake_case_ ):
__magic_name__ = tarfile.open(snake_case_ )
tar_file.extractall(snake_case_ )
tar_file.close()
else:
raise EnvironmentError('''Archive format of {} could not be identified'''.format(snake_case_ ) )
return output_path_extracted
return output_path
def _SCREAMING_SNAKE_CASE ( snake_case_ : Dict , snake_case_ : int="," ):
assert isinstance(snake_case_ , snake_case_ )
if os.path.isfile(snake_case_ ):
with open(snake_case_ ) as f:
__magic_name__ = eval(f.read() )
else:
__magic_name__ = requests.get(snake_case_ )
try:
__magic_name__ = requests.json()
except Exception:
__magic_name__ = req.content.decode()
assert data is not None, "could not connect"
try:
__magic_name__ = eval(snake_case_ )
except Exception:
__magic_name__ = data.split('''\n''' )
req.close()
return data
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] ):
__magic_name__ = requests.get(snake_case_ )
__magic_name__ = np.array(Image.open(BytesIO(response.content ) ) )
return img
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] ):
__magic_name__ = url.split('''/''' )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(snake_case_ )
with open(snake_case_ , '''rb''' ) as stream:
__magic_name__ = pkl.load(snake_case_ )
__magic_name__ = weights.pop('''model''' )
__magic_name__ = {}
for k, v in model.items():
__magic_name__ = torch.from_numpy(snake_case_ )
if "running_var" in k:
__magic_name__ = torch.tensor([0] )
__magic_name__ = k.replace('''running_var''' , '''num_batches_tracked''' )
__magic_name__ = zero
return new
def _SCREAMING_SNAKE_CASE ( ):
print(f'{os.path.abspath(os.path.join(snake_case_ , os.pardir ) )}/demo.ipynb' )
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Tuple="RGB" ):
assert isinstance(snake_case_ , snake_case_ )
if os.path.isfile(snake_case_ ):
__magic_name__ = cva.imread(snake_case_ )
else:
__magic_name__ = get_image_from_url(snake_case_ )
assert img is not None, f'could not connect to: {im}'
__magic_name__ = cva.cvtColor(snake_case_ , cva.COLOR_BGR2RGB )
if input_format == "RGB":
__magic_name__ = img[:, :, ::-1]
return img
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Dict=1 ):
return (images[i : i + batch] for i in range(0 , len(snake_case_ ) , snake_case_ )) | 678 | 0 |
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
a_ : Optional[Any] = '<<<<<<< This should probably be modified because it mentions: '
a_ : Any = '=======\n>>>>>>>\n'
a_ : int = [
'TextEncoderConfig',
'ByteTextEncoder',
'SubwordTextEncoder',
'encoder_config',
'maybe_build_from_corpus',
'manual_dir',
]
a_ : int = [
# (pattern, replacement)
# Order is important here for some replacements
(r'tfds\.core', r'datasets'),
(r'tf\.io\.gfile\.GFile', r'open'),
(r'tf\.([\w\d]+)', r'datasets.Value(\'\1\')'),
(r'tfds\.features\.Text\(\)', r'datasets.Value(\'string\')'),
(r'tfds\.features\.Text\(', r'datasets.Value(\'string\'),'),
(r'features\s*=\s*tfds.features.FeaturesDict\(', r'features=datasets.Features('),
(r'tfds\.features\.FeaturesDict\(', r'dict('),
(r'The TensorFlow Datasets Authors', r'The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'),
(r'tfds\.', r'datasets.'),
(r'dl_manager\.manual_dir', r'self.config.data_dir'),
(r'self\.builder_config', r'self.config'),
]
def _SCREAMING_SNAKE_CASE ( snake_case_ : int ):
return ConvertCommand(args.tfds_path , args.datasets_directory )
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@staticmethod
def __A ( A ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = parser.add_parser(
'''convert''' , help='''Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.''' , )
train_parser.add_argument(
'''--tfds_path''' , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help='''Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.''' , )
train_parser.add_argument(
'''--datasets_directory''' , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help='''Path to the HuggingFace Datasets folder.''' )
train_parser.set_defaults(func=_SCREAMING_SNAKE_CASE )
def __init__( self , A , A , *A ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = get_logger('''datasets-cli/converting''' )
__magic_name__ = tfds_path
__magic_name__ = datasets_directory
def __A ( self ) -> str:
'''simple docstring'''
if os.path.isdir(self._tfds_path ):
__magic_name__ = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
__magic_name__ = os.path.dirname(self._tfds_path )
else:
raise ValueError('''--tfds_path is neither a directory nor a file. Please check path.''' )
__magic_name__ = os.path.abspath(self._datasets_directory )
self._logger.info(F'Converting datasets from {abs_tfds_path} to {abs_datasets_path}' )
__magic_name__ = []
__magic_name__ = []
__magic_name__ = {}
if os.path.isdir(self._tfds_path ):
__magic_name__ = os.listdir(_SCREAMING_SNAKE_CASE )
else:
__magic_name__ = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(F'Looking at file {f_name}' )
__magic_name__ = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__magic_name__ = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if not os.path.isfile(_SCREAMING_SNAKE_CASE ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('''Skipping file''' )
continue
with open(_SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as f:
__magic_name__ = f.readlines()
__magic_name__ = []
__magic_name__ = False
__magic_name__ = False
__magic_name__ = []
for line in lines:
__magic_name__ = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
__magic_name__ = '''import datasets\n'''
elif "import tensorflow" in out_line:
# order is important here
__magic_name__ = ''''''
continue
elif "from absl import logging" in out_line:
__magic_name__ = '''from datasets import logging\n'''
elif "getLogger" in out_line:
__magic_name__ = out_line.replace('''getLogger''' , '''get_logger''' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
__magic_name__ = True
__magic_name__ = list(filter(lambda A : e in out_line , _SCREAMING_SNAKE_CASE ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(_SCREAMING_SNAKE_CASE ) + '''\n''' )
out_lines.append(_SCREAMING_SNAKE_CASE )
out_lines.append(_SCREAMING_SNAKE_CASE )
continue
else:
for pattern, replacement in TO_CONVERT:
__magic_name__ = re.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
__magic_name__ = re.match(r'''from\stensorflow_datasets.*import\s([^\.\r\n]+)''' , _SCREAMING_SNAKE_CASE )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(''',''' ) )
__magic_name__ = '''from . import ''' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F'Error converting {out_line.strip()}' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
__magic_name__ = True
out_lines.append(_SCREAMING_SNAKE_CASE )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
__magic_name__ = f_name.replace('''.py''' , '''''' )
__magic_name__ = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__magic_name__ = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
self._logger.info(F'Adding directory {output_dir}' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(_SCREAMING_SNAKE_CASE )
if needs_manual_update:
with_manual_update.append(_SCREAMING_SNAKE_CASE )
with open(_SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as f:
f.writelines(_SCREAMING_SNAKE_CASE )
self._logger.info(F'Converted in {output_file}' )
for utils_file in utils_files:
try:
__magic_name__ = os.path.basename(_SCREAMING_SNAKE_CASE )
__magic_name__ = imports_to_builder_map[f_name.replace('''.py''' , '''''' )]
self._logger.info(F'Moving {dest_folder} to {utils_file}' )
shutil.copy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
except KeyError:
self._logger.error(F'Cannot find destination folder for {utils_file}. Please copy manually.' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F'You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.' ) | 713 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
a_ : Optional[int] = 16
a_ : int = 32
def _SCREAMING_SNAKE_CASE ( snake_case_ : Accelerator , snake_case_ : int = 16 , snake_case_ : str = "bert-base-cased" ):
__magic_name__ = AutoTokenizer.from_pretrained(snake_case_ )
__magic_name__ = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(snake_case_ : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
__magic_name__ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=snake_case_ , max_length=snake_case_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__magic_name__ = datasets.map(
snake_case_ , batched=snake_case_ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=snake_case_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__magic_name__ = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(snake_case_ : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(snake_case_ , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(snake_case_ , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
__magic_name__ = DataLoader(
tokenized_datasets['''train'''] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ )
__magic_name__ = DataLoader(
tokenized_datasets['''validation'''] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ )
return train_dataloader, eval_dataloader
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : str ):
model.eval()
__magic_name__ = 0
for step, batch in enumerate(snake_case_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__magic_name__ = model(**snake_case_ )
__magic_name__ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__magic_name__ , __magic_name__ = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(snake_case_ ) - 1:
__magic_name__ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__magic_name__ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=snake_case_ , references=snake_case_ , )
__magic_name__ = metric.compute()
return eval_metric["accuracy"]
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Tuple ):
# Initialize accelerator
__magic_name__ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__magic_name__ = config['''lr''']
__magic_name__ = int(config['''num_epochs'''] )
__magic_name__ = int(config['''seed'''] )
__magic_name__ = int(config['''batch_size'''] )
__magic_name__ = args.model_name_or_path
set_seed(snake_case_ )
__magic_name__ , __magic_name__ = get_dataloaders(snake_case_ , snake_case_ , snake_case_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__magic_name__ = AutoModelForSequenceClassification.from_pretrained(snake_case_ , return_dict=snake_case_ )
# Instantiate optimizer
__magic_name__ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__magic_name__ = optimizer_cls(params=model.parameters() , lr=snake_case_ )
if accelerator.state.deepspeed_plugin is not None:
__magic_name__ = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
__magic_name__ = 1
__magic_name__ = (len(snake_case_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__magic_name__ = get_linear_schedule_with_warmup(
optimizer=snake_case_ , num_warmup_steps=0 , num_training_steps=snake_case_ , )
else:
__magic_name__ = DummyScheduler(snake_case_ , total_num_steps=snake_case_ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = accelerator.prepare(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# We need to keep track of how many total steps we have iterated over
__magic_name__ = 0
# We also need to keep track of the stating epoch so files are named properly
__magic_name__ = 0
__magic_name__ = evaluate.load('''glue''' , '''mrpc''' )
__magic_name__ = num_epochs
if args.partial_train_epoch is not None:
__magic_name__ = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
__magic_name__ = args.resume_from_checkpoint.split('''epoch_''' )[1]
__magic_name__ = ''''''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
__magic_name__ = int(snake_case_ ) + 1
__magic_name__ = evaluation_loop(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
accelerator.print('''resumed checkpoint performance:''' , snake_case_ )
accelerator.print('''resumed checkpoint\'s scheduler\'s lr:''' , lr_scheduler.get_lr()[0] )
accelerator.print('''resumed optimizers\'s lr:''' , optimizer.param_groups[0]['''lr'''] )
with open(os.path.join(args.output_dir , f'state_{starting_epoch-1}.json' ) , '''r''' ) as f:
__magic_name__ = json.load(snake_case_ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
__magic_name__ = {}
for epoch in range(snake_case_ , snake_case_ ):
model.train()
for step, batch in enumerate(snake_case_ ):
__magic_name__ = model(**snake_case_ )
__magic_name__ = outputs.loss
__magic_name__ = loss / gradient_accumulation_steps
accelerator.backward(snake_case_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
__magic_name__ = f'epoch_{epoch}'
__magic_name__ = os.path.join(args.output_dir , snake_case_ )
accelerator.save_state(snake_case_ )
__magic_name__ = evaluation_loop(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
__magic_name__ = accuracy
__magic_name__ = lr_scheduler.get_lr()[0]
__magic_name__ = optimizer.param_groups[0]['''lr''']
__magic_name__ = epoch
__magic_name__ = overall_step
accelerator.print(f'epoch {epoch}:' , snake_case_ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f'state_{epoch}.json' ) , '''w''' ) as f:
json.dump(snake_case_ , snake_case_ )
def _SCREAMING_SNAKE_CASE ( ):
__magic_name__ = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=snake_case_ , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=snake_case_ , )
parser.add_argument(
'''--output_dir''' , type=snake_case_ , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--resume_from_checkpoint''' , type=snake_case_ , default=snake_case_ , help='''If the training should continue from a checkpoint folder.''' , )
parser.add_argument(
'''--partial_train_epoch''' , type=snake_case_ , default=snake_case_ , help='''If passed, the training will stop after this number of epochs.''' , )
parser.add_argument(
'''--num_epochs''' , type=snake_case_ , default=2 , help='''Number of train epochs.''' , )
__magic_name__ = parser.parse_args()
__magic_name__ = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(snake_case_ , snake_case_ )
if __name__ == "__main__":
main() | 678 | 0 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
a_ : Optional[Any] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE_ ( UpperCAmelCase__ ):
"""simple docstring"""
_a = ['pixel_values']
def __init__( self , A = True , A = None , A = PILImageResampling.BILINEAR , A = True , A = 1 / 2_55 , A = True , A = None , A = True , **A , ) -> None:
'''simple docstring'''
super().__init__(**A )
__magic_name__ = size if size is not None else {"""shortest_edge""": 2_24}
__magic_name__ = get_size_dict(A , default_to_square=A )
__magic_name__ = crop_size if crop_size is not None else {"""height""": 2_56, """width""": 2_56}
__magic_name__ = get_size_dict(A , param_name='''crop_size''' )
__magic_name__ = do_resize
__magic_name__ = size
__magic_name__ = resample
__magic_name__ = do_rescale
__magic_name__ = rescale_factor
__magic_name__ = do_center_crop
__magic_name__ = crop_size
__magic_name__ = do_flip_channel_order
def __A ( self , A , A , A = PIL.Image.BILINEAR , A = None , **A , ) -> np.ndarray:
'''simple docstring'''
__magic_name__ = get_size_dict(A , default_to_square=A )
if "shortest_edge" not in size:
raise ValueError(F'The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}' )
__magic_name__ = get_resize_output_image_size(A , size=size['''shortest_edge'''] , default_to_square=A )
return resize(A , size=A , resample=A , data_format=A , **A )
def __A ( self , A , A , A = None , **A , ) -> np.ndarray:
'''simple docstring'''
__magic_name__ = get_size_dict(A )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(A , size=(size['''height'''], size['''width''']) , data_format=A , **A )
def __A ( self , A , A , A = None , **A , ) -> Optional[Any]:
'''simple docstring'''
return rescale(A , scale=A , data_format=A , **A )
def __A ( self , A , A = None ) -> np.ndarray:
'''simple docstring'''
return flip_channel_order(A , data_format=A )
def __A ( self , A , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = ChannelDimension.FIRST , **A , ) -> PIL.Image.Image:
'''simple docstring'''
__magic_name__ = do_resize if do_resize is not None else self.do_resize
__magic_name__ = resample if resample is not None else self.resample
__magic_name__ = do_rescale if do_rescale is not None else self.do_rescale
__magic_name__ = rescale_factor if rescale_factor is not None else self.rescale_factor
__magic_name__ = do_center_crop if do_center_crop is not None else self.do_center_crop
__magic_name__ = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
__magic_name__ = size if size is not None else self.size
__magic_name__ = get_size_dict(A , default_to_square=A )
__magic_name__ = crop_size if crop_size is not None else self.crop_size
__magic_name__ = get_size_dict(A , param_name='''crop_size''' )
__magic_name__ = make_list_of_images(A )
if not valid_images(A ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
# All transformations expect numpy arrays.
__magic_name__ = [to_numpy_array(A ) for image in images]
if do_resize:
__magic_name__ = [self.resize(image=A , size=A , resample=A ) for image in images]
if do_center_crop:
__magic_name__ = [self.center_crop(image=A , size=A ) for image in images]
if do_rescale:
__magic_name__ = [self.rescale(image=A , scale=A ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
__magic_name__ = [self.flip_channel_order(image=A ) for image in images]
__magic_name__ = [to_channel_dimension_format(A , A ) for image in images]
__magic_name__ = {"""pixel_values""": images}
return BatchFeature(data=A , tensor_type=A )
def __A ( self , A , A = None ) -> List[str]:
'''simple docstring'''
__magic_name__ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(A ) != len(A ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(A ):
__magic_name__ = target_sizes.numpy()
__magic_name__ = []
for idx in range(len(A ) ):
__magic_name__ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=A )
__magic_name__ = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(A )
else:
__magic_name__ = logits.argmax(dim=1 )
__magic_name__ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation | 714 |
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
return " ".join(
''''''.join(word[::-1] ) if len(snake_case_ ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('Hey wollef sroirraw')) | 678 | 0 |
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class SCREAMING_SNAKE_CASE_ ( lowercase__ ):
"""simple docstring"""
def __A ( self ) -> Tuple:
'''simple docstring'''
__magic_name__ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__lowercase , '''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(__lowercase , '''num_attention_heads''' ) )
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , A , A=13 , A=64 , A=3 , A=3 , A=2 , A=1 , A=16 , A=[1_28, 2_56, 3_84] , A=[4, 6, 8] , A=[2, 3, 4] , A=[16, 16, 16] , A=0 , A=[2, 2, 2] , A=[2, 2, 2] , A=0.02 , A=True , A=True , A=2 , ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = image_size
__magic_name__ = num_channels
__magic_name__ = kernel_size
__magic_name__ = stride
__magic_name__ = padding
__magic_name__ = hidden_sizes
__magic_name__ = num_attention_heads
__magic_name__ = depths
__magic_name__ = key_dim
__magic_name__ = drop_path_rate
__magic_name__ = patch_size
__magic_name__ = attention_ratio
__magic_name__ = mlp_ratio
__magic_name__ = initializer_range
__magic_name__ = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
__magic_name__ = is_training
__magic_name__ = use_labels
__magic_name__ = num_labels
__magic_name__ = initializer_range
def __A ( self ) -> Tuple:
'''simple docstring'''
__magic_name__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__magic_name__ = None
if self.use_labels:
__magic_name__ = ids_tensor([self.batch_size] , self.num_labels )
__magic_name__ = self.get_config()
return config, pixel_values, labels
def __A ( self ) -> str:
'''simple docstring'''
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def __A ( self , A , A , A ) -> Tuple:
'''simple docstring'''
__magic_name__ = LevitModel(config=__lowercase )
model.to(__lowercase )
model.eval()
__magic_name__ = model(__lowercase )
__magic_name__ = (self.image_size, self.image_size)
__magic_name__ , __magic_name__ = image_size[0], image_size[1]
for _ in range(4 ):
__magic_name__ = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
__magic_name__ = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def __A ( self , A , A , A ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = self.num_labels
__magic_name__ = LevitForImageClassification(__lowercase )
model.to(__lowercase )
model.eval()
__magic_name__ = model(__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self ) -> List[str]:
'''simple docstring'''
__magic_name__ = self.prepare_config_and_inputs()
__magic_name__ , __magic_name__ , __magic_name__ = config_and_inputs
__magic_name__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE_ ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
_a = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
_a = (
{
"""feature-extraction""": LevitModel,
"""image-classification""": (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
_a = False
_a = False
_a = False
_a = False
_a = False
def __A ( self ) -> List[str]:
'''simple docstring'''
__magic_name__ = LevitModelTester(self )
__magic_name__ = ConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase , hidden_size=37 )
def __A ( self ) -> Tuple:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __A ( self ) -> Dict:
'''simple docstring'''
return
@unittest.skip(reason='''Levit does not use inputs_embeds''' )
def __A ( self ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''Levit does not support input and output embeddings''' )
def __A ( self ) -> int:
'''simple docstring'''
pass
@unittest.skip(reason='''Levit does not output attentions''' )
def __A ( self ) -> Tuple:
'''simple docstring'''
pass
def __A ( self ) -> Any:
'''simple docstring'''
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ = model_class(__lowercase )
__magic_name__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ = [*signature.parameters.keys()]
__magic_name__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowercase )
def __A ( self ) -> Dict:
'''simple docstring'''
def check_hidden_states_output(A , A , A ):
__magic_name__ = model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
__magic_name__ = model(**self._prepare_for_class(__lowercase , __lowercase ) )
__magic_name__ = outputs.hidden_states
__magic_name__ = len(self.model_tester.depths ) + 1
self.assertEqual(len(__lowercase ) , __lowercase )
__magic_name__ = (self.model_tester.image_size, self.model_tester.image_size)
__magic_name__ , __magic_name__ = image_size[0], image_size[1]
for _ in range(4 ):
__magic_name__ = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
__magic_name__ = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ = True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__magic_name__ = True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
pass
def __A ( self , A , A , A=False ) -> Any:
'''simple docstring'''
__magic_name__ = super()._prepare_for_class(__lowercase , __lowercase , return_labels=__lowercase )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def __A ( self ) -> Tuple:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowercase )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
if not self.model_tester.is_training:
return
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(__lowercase )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
__magic_name__ = model_class(__lowercase )
model.to(__lowercase )
model.train()
__magic_name__ = self._prepare_for_class(__lowercase , __lowercase , return_labels=__lowercase )
__magic_name__ = model(**__lowercase ).loss
loss.backward()
def __A ( self ) -> str:
'''simple docstring'''
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
__magic_name__ = False
__magic_name__ = True
for model_class in self.all_model_classes:
if model_class in get_values(__lowercase ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
__magic_name__ = model_class(__lowercase )
model.gradient_checkpointing_enable()
model.to(__lowercase )
model.train()
__magic_name__ = self._prepare_for_class(__lowercase , __lowercase , return_labels=__lowercase )
__magic_name__ = model(**__lowercase ).loss
loss.backward()
def __A ( self ) -> List[Any]:
'''simple docstring'''
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ = [
{'''title''': '''multi_label_classification''', '''num_labels''': 2, '''dtype''': torch.float},
{'''title''': '''single_label_classification''', '''num_labels''': 1, '''dtype''': torch.long},
{'''title''': '''regression''', '''num_labels''': 1, '''dtype''': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(__lowercase ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'Testing {model_class} with {problem_type["title"]}' ):
__magic_name__ = problem_type['''title''']
__magic_name__ = problem_type['''num_labels''']
__magic_name__ = model_class(__lowercase )
model.to(__lowercase )
model.train()
__magic_name__ = self._prepare_for_class(__lowercase , __lowercase , return_labels=__lowercase )
if problem_type["num_labels"] > 1:
__magic_name__ = inputs['''labels'''].unsqueeze(1 ).repeat(1 , problem_type['''num_labels'''] )
__magic_name__ = inputs['''labels'''].to(problem_type['''dtype'''] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=__lowercase ) as warning_list:
__magic_name__ = model(**__lowercase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'Something is going wrong in the regression problem: intercepted {w.message}' )
loss.backward()
@slow
def __A ( self ) -> Dict:
'''simple docstring'''
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ = LevitModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def _SCREAMING_SNAKE_CASE ( ):
__magic_name__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __A ( self ) -> Dict:
'''simple docstring'''
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def __A ( self ) -> int:
'''simple docstring'''
__magic_name__ = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
__lowercase )
__magic_name__ = self.default_image_processor
__magic_name__ = prepare_img()
__magic_name__ = image_processor(images=__lowercase , return_tensors='''pt''' ).to(__lowercase )
# forward pass
with torch.no_grad():
__magic_name__ = model(**__lowercase )
# verify the logits
__magic_name__ = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __lowercase )
__magic_name__ = torch.tensor([1.04_48, -0.37_45, -1.83_17] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1E-4 ) )
| 715 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
a_ : Any = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n'
a_ : int = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n'
a_ : List[str] = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE_ ( datasets.Metric ):
"""simple docstring"""
def __A ( self ) -> List[Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/krishnap25/mauve''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/krishnap25/mauve'''] , reference_urls=[
'''https://arxiv.org/abs/2102.01454''',
'''https://github.com/krishnap25/mauve''',
] , )
def __A ( self , A , A , A=None , A=None , A=None , A=None , A="auto" , A=-1 , A=0.9 , A=5 , A=5_00 , A="gpt2-large" , A=-1 , A=10_24 , A=25 , A=5 , A=True , A=25 , ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = compute_mauve(
p_text=A , q_text=A , p_features=A , q_features=A , p_tokens=A , q_tokens=A , num_buckets=A , pca_max_data=A , kmeans_explained_var=A , kmeans_num_redo=A , kmeans_max_iter=A , featurize_model_name=A , device_id=A , max_text_length=A , divergence_curve_discretization_size=A , mauve_scaling_factor=A , verbose=A , seed=A , )
return out | 678 | 0 |
def _SCREAMING_SNAKE_CASE ( snake_case_ : Tuple ):
__magic_name__ = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4)) | 716 |
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
a_ : Tuple = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
a_ : List[str] = typing.Union[np.floataa, int, float] # noqa: UP007
def _SCREAMING_SNAKE_CASE ( snake_case_ : Vector , snake_case_ : Vector ):
return np.sqrt(np.sum((np.asarray(snake_case_ ) - np.asarray(snake_case_ )) ** 2 ) )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Vector , snake_case_ : Vector ):
return sum((va - va) ** 2 for va, va in zip(snake_case_ , snake_case_ ) ) ** (1 / 2)
if __name__ == "__main__":
def _SCREAMING_SNAKE_CASE ( ):
from timeit import timeit
print('''Without Numpy''' )
print(
timeit(
'''euclidean_distance_no_np([1, 2, 3], [4, 5, 6])''' , number=1_0000 , globals=globals() , ) )
print('''With Numpy''' )
print(
timeit(
'''euclidean_distance([1, 2, 3], [4, 5, 6])''' , number=1_0000 , globals=globals() , ) )
benchmark() | 678 | 0 |
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def _SCREAMING_SNAKE_CASE ( snake_case_ : bytes , snake_case_ : int ):
__magic_name__ = f'{sampling_rate}'
__magic_name__ = '''1'''
__magic_name__ = '''f32le'''
__magic_name__ = [
'''ffmpeg''',
'''-i''',
'''pipe:0''',
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
try:
with subprocess.Popen(snake_case_ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
__magic_name__ = ffmpeg_process.communicate(snake_case_ )
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to load audio files from filename''' ) from error
__magic_name__ = output_stream[0]
__magic_name__ = np.frombuffer(snake_case_ , np.floataa )
if audio.shape[0] == 0:
raise ValueError('''Malformed soundfile''' )
return audio
def _SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : float , snake_case_ : str = "f32le" , ):
__magic_name__ = f'{sampling_rate}'
__magic_name__ = '''1'''
if format_for_conversion == "s16le":
__magic_name__ = 2
elif format_for_conversion == "f32le":
__magic_name__ = 4
else:
raise ValueError(f'Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`' )
__magic_name__ = platform.system()
if system == "Linux":
__magic_name__ = '''alsa'''
__magic_name__ = '''default'''
elif system == "Darwin":
__magic_name__ = '''avfoundation'''
__magic_name__ = ''':0'''
elif system == "Windows":
__magic_name__ = '''dshow'''
__magic_name__ = '''default'''
__magic_name__ = [
'''ffmpeg''',
'''-f''',
format_,
'''-i''',
input_,
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-fflags''',
'''nobuffer''',
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
__magic_name__ = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
__magic_name__ = _ffmpeg_stream(snake_case_ , snake_case_ )
for item in iterator:
yield item
def _SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : float , snake_case_ : Optional[int] = None , snake_case_ : Optional[Union[Tuple[float, float], float]] = None , snake_case_ : str = "f32le" , ):
if stream_chunk_s is not None:
__magic_name__ = stream_chunk_s
else:
__magic_name__ = chunk_length_s
__magic_name__ = ffmpeg_microphone(snake_case_ , snake_case_ , format_for_conversion=snake_case_ )
if format_for_conversion == "s16le":
__magic_name__ = np.intaa
__magic_name__ = 2
elif format_for_conversion == "f32le":
__magic_name__ = np.floataa
__magic_name__ = 4
else:
raise ValueError(f'Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`' )
if stride_length_s is None:
__magic_name__ = chunk_length_s / 6
__magic_name__ = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(snake_case_ , (int, float) ):
__magic_name__ = [stride_length_s, stride_length_s]
__magic_name__ = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
__magic_name__ = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
__magic_name__ = datetime.datetime.now()
__magic_name__ = datetime.timedelta(seconds=snake_case_ )
for item in chunk_bytes_iter(snake_case_ , snake_case_ , stride=(stride_left, stride_right) , stream=snake_case_ ):
# Put everything back in numpy scale
__magic_name__ = np.frombuffer(item['''raw'''] , dtype=snake_case_ )
__magic_name__ = (
item['''stride'''][0] // size_of_sample,
item['''stride'''][1] // size_of_sample,
)
__magic_name__ = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] , snake_case_ : int , snake_case_ : Tuple[int, int] , snake_case_ : bool = False ):
__magic_name__ = B''''''
__magic_name__ , __magic_name__ = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f'Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}' )
__magic_name__ = 0
for raw in iterator:
acc += raw
if stream and len(snake_case_ ) < chunk_len:
__magic_name__ = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(snake_case_ ) >= chunk_len:
# We are flushing the accumulator
__magic_name__ = (_stride_left, stride_right)
__magic_name__ = {'''raw''': acc[:chunk_len], '''stride''': stride}
if stream:
__magic_name__ = False
yield item
__magic_name__ = stride_left
__magic_name__ = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(snake_case_ ) > stride_left:
__magic_name__ = {'''raw''': acc, '''stride''': (_stride_left, 0)}
if stream:
__magic_name__ = False
yield item
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] , snake_case_ : int ):
__magic_name__ = 2**24 # 16Mo
try:
with subprocess.Popen(snake_case_ , stdout=subprocess.PIPE , bufsize=snake_case_ ) as ffmpeg_process:
while True:
__magic_name__ = ffmpeg_process.stdout.read(snake_case_ )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to stream audio files from filename''' ) from error
| 717 |
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
a_ : str = logging.get_logger(__name__)
a_ : Union[str, Any] = 'https://openaipublic.azureedge.net/jukebox/models/'
a_ : List[Any] = {
'jukebox-1b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'1b_lyrics/prior_level_2.pth.tar',
],
'jukebox-5b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'5b_lyrics/prior_level_2.pth.tar',
],
}
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] ):
if key.endswith('''.model.1.bias''' ) and len(key.split('''.''' ) ) > 10:
__magic_name__ = key.replace('''.model.1.bias''' , '''.conv1d_1.bias''' )
elif key.endswith('''.model.1.weight''' ) and len(key.split('''.''' ) ) > 10:
__magic_name__ = key.replace('''.model.1.weight''' , '''.conv1d_1.weight''' )
elif key.endswith('''.model.3.bias''' ) and len(key.split('''.''' ) ) > 10:
__magic_name__ = key.replace('''.model.3.bias''' , '''.conv1d_2.bias''' )
elif key.endswith('''.model.3.weight''' ) and len(key.split('''.''' ) ) > 10:
__magic_name__ = key.replace('''.model.3.weight''' , '''.conv1d_2.weight''' )
if "conditioner_blocks.0." in key:
__magic_name__ = key.replace('''conditioner_blocks.0''' , '''conditioner_blocks''' )
if "prime_prior" in key:
__magic_name__ = key.replace('''prime_prior''' , '''encoder''' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
__magic_name__ = key.replace('''.emb.''' , '''.''' )
if key.endswith('''k''' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('''.k''' , '''.codebook''' )
if "y_emb." in key:
return key.replace('''y_emb.''' , '''metadata_embedding.''' )
if "x_emb.emb." in key:
__magic_name__ = key.replace('''0.x_emb.emb''' , '''embed_tokens''' )
if "prime_state_ln" in key:
return key.replace('''prime_state_ln''' , '''encoder.final_layer_norm''' )
if ".ln" in key:
return key.replace('''.ln''' , '''.layer_norm''' )
if "_ln" in key:
return key.replace('''_ln''' , '''_layer_norm''' )
if "prime_state_proj" in key:
return key.replace('''prime_state_proj''' , '''encoder.proj_in''' )
if "prime_x_out" in key:
return key.replace('''prime_x_out''' , '''encoder.lm_head''' )
if "prior.x_out" in key:
return key.replace('''x_out''' , '''fc_proj_out''' )
if "x_emb" in key:
return key.replace('''x_emb''' , '''embed_tokens''' )
return key
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : Optional[int] ):
__magic_name__ = {}
import re
__magic_name__ = re.compile(r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
__magic_name__ = re.compile(
r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
__magic_name__ = re.compile(r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
__magic_name__ = re.compile(r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
__magic_name__ = re.compile(
r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
__magic_name__ = re.compile(r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
__magic_name__ = re.compile(r'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)''' )
__magic_name__ = re.compile(
r'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
__magic_name__ = re.compile(r'''conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)''' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(snake_case_ ):
__magic_name__ = re_encoder_block_conv_in.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[2] ) * 2 + int(groups[3] )
__magic_name__ = f'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'
__magic_name__ = re_encoder_block_conv_in.sub(snake_case_ , snake_case_ )
elif re_encoder_block_resnet.fullmatch(snake_case_ ):
__magic_name__ = re_encoder_block_resnet.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[2] ) * 2 + int(groups[3] )
__magic_name__ = {'''1''': 1, '''3''': 2}[groups[-2]]
__magic_name__ = f'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'
__magic_name__ = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
__magic_name__ = prefix + resnet_block
__magic_name__ = re_encoder_block_resnet.sub(snake_case_ , snake_case_ )
elif re_encoder_block_proj_out.fullmatch(snake_case_ ):
__magic_name__ = re_encoder_block_proj_out.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = f'encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'
__magic_name__ = re_encoder_block_proj_out.sub(snake_case_ , snake_case_ )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(snake_case_ ):
__magic_name__ = re_decoder_block_conv_out.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[2] ) * 2 + int(groups[3] ) - 2
__magic_name__ = f'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'
__magic_name__ = re_decoder_block_conv_out.sub(snake_case_ , snake_case_ )
elif re_decoder_block_resnet.fullmatch(snake_case_ ):
__magic_name__ = re_decoder_block_resnet.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[2] ) * 2 + int(groups[3] ) - 2
__magic_name__ = {'''1''': 1, '''3''': 2}[groups[-2]]
__magic_name__ = f'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'
__magic_name__ = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
__magic_name__ = prefix + resnet_block
__magic_name__ = re_decoder_block_resnet.sub(snake_case_ , snake_case_ )
elif re_decoder_block_proj_in.fullmatch(snake_case_ ):
__magic_name__ = re_decoder_block_proj_in.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = f'decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'
__magic_name__ = re_decoder_block_proj_in.sub(snake_case_ , snake_case_ )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(snake_case_ ):
__magic_name__ = re_prior_cond_conv_out.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[1] ) * 2 + int(groups[2] ) - 2
__magic_name__ = f'conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'
__magic_name__ = re_prior_cond_conv_out.sub(snake_case_ , snake_case_ )
elif re_prior_cond_resnet.fullmatch(snake_case_ ):
__magic_name__ = re_prior_cond_resnet.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[1] ) * 2 + int(groups[2] ) - 2
__magic_name__ = {'''1''': 1, '''3''': 2}[groups[-2]]
__magic_name__ = f'conditioner_blocks.upsampler.upsample_block.{block_index}.'
__magic_name__ = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
__magic_name__ = prefix + resnet_block
__magic_name__ = re_prior_cond_resnet.sub(snake_case_ , snake_case_ )
elif re_prior_cond_proj_in.fullmatch(snake_case_ ):
__magic_name__ = re_prior_cond_proj_in.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = f'conditioner_blocks.upsampler.proj_in.{groups[-1]}'
__magic_name__ = re_prior_cond_proj_in.sub(snake_case_ , snake_case_ )
# keep original key
else:
__magic_name__ = original_key
__magic_name__ = replace_key(snake_case_ )
if f'{key_prefix}.{key}' not in model_state_dict or key is None:
print(f'failed converting {original_key} to {key}, does not match' )
# handle missmatched shape
elif value.shape != model_state_dict[f'{key_prefix}.{key}'].shape:
__magic_name__ = model_state_dict[f'{key_prefix}.{key}']
print(f'{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match' )
__magic_name__ = original_key
__magic_name__ = original_key
__magic_name__ = value
return new_dict
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( snake_case_ : Dict=None , snake_case_ : Any=None ):
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' ):
__magic_name__ = requests.get(f'{PREFIX}{file}' , allow_redirects=snake_case_ )
os.makedirs(f'{pytorch_dump_folder_path}/' , exist_ok=snake_case_ )
open(f'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' , '''wb''' ).write(r.content )
__magic_name__ = MODEL_MAPPING[model_name.split('''/''' )[-1]]
__magic_name__ = JukeboxConfig.from_pretrained(snake_case_ )
__magic_name__ = JukeboxModel(snake_case_ )
__magic_name__ = []
__magic_name__ = {}
for i, dict_name in enumerate(snake_case_ ):
__magic_name__ = torch.load(f'{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}' )['''model''']
__magic_name__ = {}
for k in old_dic.keys():
if k.endswith('''.b''' ):
__magic_name__ = old_dic[k]
elif k.endswith('''.w''' ):
__magic_name__ = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
__magic_name__ = old_dic[k]
else:
__magic_name__ = old_dic[k]
__magic_name__ = '''vqvae''' if i == 0 else f'priors.{3 - i}'
__magic_name__ = fix_jukebox_keys(snake_case_ , model.state_dict() , snake_case_ , snake_case_ )
weight_dict.append(snake_case_ )
__magic_name__ = weight_dict.pop(0 )
model.vqvae.load_state_dict(snake_case_ )
for i in range(len(snake_case_ ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
with open(f'{pytorch_dump_folder_path}/mapping.json' , '''w''' ) as txtfile:
json.dump(snake_case_ , snake_case_ )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case_ )
return weight_dict
if __name__ == "__main__":
a_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='jukebox-5b-lyrics',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='jukebox-5b-lyrics-converted',
type=str,
help='Path to the output PyTorch model directory.',
)
a_ : int = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path) | 678 | 0 |
import argparse
import os
import re
import packaging.version
a_ : Dict = 'examples/'
a_ : Tuple = {
'examples': (re.compile(r'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(r'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(r'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), r'\1version="VERSION",'),
'doc': (re.compile(r'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
a_ : List[str] = {
'init': 'src/transformers/__init__.py',
'setup': 'setup.py',
}
a_ : int = 'README.md'
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] , snake_case_ : Any , snake_case_ : Dict ):
with open(__lowerCAmelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__magic_name__ = f.read()
__magic_name__ = REPLACE_PATTERNS[pattern]
__magic_name__ = replace.replace('''VERSION''' , __lowerCAmelCase )
__magic_name__ = re_pattern.sub(__lowerCAmelCase , __lowerCAmelCase )
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(__lowerCAmelCase )
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[str] ):
for folder, directories, fnames in os.walk(__lowerCAmelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase , pattern='''examples''' )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Optional[int]=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if not patch:
update_version_in_examples(__lowerCAmelCase )
def _SCREAMING_SNAKE_CASE ( ):
__magic_name__ = "🤗 Transformers currently provides the following architectures"
__magic_name__ = "1. Want to contribute a new model?"
with open(__lowerCAmelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__magic_name__ = f.readlines()
# Find the start of the list.
__magic_name__ = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__magic_name__ = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
__magic_name__ = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , )
index += 1
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(__lowerCAmelCase )
def _SCREAMING_SNAKE_CASE ( ):
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
__magic_name__ = f.read()
__magic_name__ = REPLACE_PATTERNS["init"][0].search(__lowerCAmelCase ).groups()[0]
return packaging.version.parse(__lowerCAmelCase )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any]=False ):
__magic_name__ = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
__magic_name__ = default_version.base_version
elif patch:
__magic_name__ = f'{default_version.major}.{default_version.minor}.{default_version.micro + 1}'
else:
__magic_name__ = f'{default_version.major}.{default_version.minor + 1}.0'
# Now let's ask nicely if that's the right one.
__magic_name__ = input(f'Which version are you releasing? [{default_version}]' )
if len(__lowerCAmelCase ) == 0:
__magic_name__ = default_version
print(f'Updating version to {version}.' )
global_version_update(__lowerCAmelCase , patch=__lowerCAmelCase )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def _SCREAMING_SNAKE_CASE ( ):
__magic_name__ = get_version()
__magic_name__ = f'{current_version.major}.{current_version.minor + 1}.0.dev0'
__magic_name__ = current_version.base_version
# Check with the user we got that right.
__magic_name__ = input(f'Which version are we developing now? [{dev_version}]' )
if len(__lowerCAmelCase ) == 0:
__magic_name__ = dev_version
print(f'Updating version to {version}.' )
global_version_update(__lowerCAmelCase )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
a_ : int = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
a_ : Tuple = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work() | 718 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a_ : int = logging.get_logger(__name__)
a_ : Optional[int] = {
'microsoft/table-transformer-detection': (
'https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'
),
}
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = """table-transformer"""
_a = ["""past_key_values"""]
_a = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , A=True , A=None , A=3 , A=1_00 , A=6 , A=20_48 , A=8 , A=6 , A=20_48 , A=8 , A=0.0 , A=0.0 , A=True , A="relu" , A=2_56 , A=0.1 , A=0.0 , A=0.0 , A=0.02 , A=1.0 , A=False , A="sine" , A="resnet50" , A=True , A=False , A=1 , A=5 , A=2 , A=1 , A=1 , A=5 , A=2 , A=0.1 , **A , ) -> Any:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
__magic_name__ = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(A , A ):
__magic_name__ = backbone_config.get('''model_type''' )
__magic_name__ = CONFIG_MAPPING[backbone_model_type]
__magic_name__ = config_class.from_dict(A )
# set timm attributes to None
__magic_name__ , __magic_name__ , __magic_name__ = None, None, None
__magic_name__ = use_timm_backbone
__magic_name__ = backbone_config
__magic_name__ = num_channels
__magic_name__ = num_queries
__magic_name__ = d_model
__magic_name__ = encoder_ffn_dim
__magic_name__ = encoder_layers
__magic_name__ = encoder_attention_heads
__magic_name__ = decoder_ffn_dim
__magic_name__ = decoder_layers
__magic_name__ = decoder_attention_heads
__magic_name__ = dropout
__magic_name__ = attention_dropout
__magic_name__ = activation_dropout
__magic_name__ = activation_function
__magic_name__ = init_std
__magic_name__ = init_xavier_std
__magic_name__ = encoder_layerdrop
__magic_name__ = decoder_layerdrop
__magic_name__ = encoder_layers
__magic_name__ = auxiliary_loss
__magic_name__ = position_embedding_type
__magic_name__ = backbone
__magic_name__ = use_pretrained_backbone
__magic_name__ = dilation
# Hungarian matcher
__magic_name__ = class_cost
__magic_name__ = bbox_cost
__magic_name__ = giou_cost
# Loss coefficients
__magic_name__ = mask_loss_coefficient
__magic_name__ = dice_loss_coefficient
__magic_name__ = bbox_loss_coefficient
__magic_name__ = giou_loss_coefficient
__magic_name__ = eos_coefficient
super().__init__(is_encoder_decoder=A , **A )
@property
def __A ( self ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def __A ( self ) -> int:
'''simple docstring'''
return self.d_model
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = version.parse("""1.11""" )
@property
def __A ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def __A ( self ) -> float:
'''simple docstring'''
return 1E-5
@property
def __A ( self ) -> int:
'''simple docstring'''
return 12 | 678 | 0 |
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ) -> Any:
'''simple docstring'''
__magic_name__ = StableDiffusionKDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
__magic_name__ = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
sd_pipe.set_scheduler('''sample_euler''' )
__magic_name__ = '''A painting of a squirrel eating a burger'''
__magic_name__ = torch.manual_seed(0 )
__magic_name__ = sd_pipe([prompt] , generator=lowerCAmelCase_ , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' )
__magic_name__ = output.images
__magic_name__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
__magic_name__ = np.array([0.04_47, 0.04_92, 0.04_68, 0.04_08, 0.03_83, 0.04_08, 0.03_54, 0.03_80, 0.03_39] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __A ( self ) -> int:
'''simple docstring'''
__magic_name__ = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
__magic_name__ = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
sd_pipe.set_scheduler('''sample_euler''' )
__magic_name__ = '''A painting of a squirrel eating a burger'''
__magic_name__ = torch.manual_seed(0 )
__magic_name__ = sd_pipe([prompt] , generator=lowerCAmelCase_ , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' )
__magic_name__ = output.images
__magic_name__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
__magic_name__ = np.array([0.12_37, 0.13_20, 0.14_38, 0.13_59, 0.13_90, 0.11_32, 0.12_77, 0.11_75, 0.11_12] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1
def __A ( self ) -> Any:
'''simple docstring'''
__magic_name__ = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
__magic_name__ = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
sd_pipe.set_scheduler('''sample_dpmpp_2m''' )
__magic_name__ = '''A painting of a squirrel eating a burger'''
__magic_name__ = torch.manual_seed(0 )
__magic_name__ = sd_pipe(
[prompt] , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=15 , output_type='''np''' , use_karras_sigmas=lowerCAmelCase_ , )
__magic_name__ = output.images
__magic_name__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
__magic_name__ = np.array(
[0.11_38_16_89, 0.12_11_29_21, 0.1_38_94_57, 0.12_54_96_06, 0.1_24_49_64, 0.10_83_15_17, 0.11_56_28_66, 0.10_86_78_16, 0.10_49_90_48] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 | 719 |
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Optional[int] , snake_case_ : Union[str, Any] ):
# Initialise PyTorch model
__magic_name__ = LxmertConfig.from_json_file(snake_case_ )
print(f'Building PyTorch model from configuration: {config}' )
__magic_name__ = LxmertForPreTraining(snake_case_ )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(snake_case_ , snake_case_ , snake_case_ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , snake_case_ )
if __name__ == "__main__":
a_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a_ : Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path) | 678 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
a_ : Any = None
a_ : List[Any] = logging.get_logger(__name__)
a_ : int = {'vocab_file': 'sentencepiece.model', 'tokenizer_file': 'tokenizer.json'}
a_ : int = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
'tokenizer_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/tokenizer.json',
},
}
a_ : Optional[Any] = {
'google/rembert': 256,
}
a_ : Tuple = '▁'
class SCREAMING_SNAKE_CASE_ ( __lowercase ):
"""simple docstring"""
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = RemBertTokenizer
def __init__( self , A=None , A=None , A=True , A=True , A=False , A="[CLS]" , A="[SEP]" , A="<unk>" , A="[SEP]" , A="<pad>" , A="[CLS]" , A="[MASK]" , **A , ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token
super().__init__(
__a , tokenizer_file=__a , do_lower_case=__a , remove_space=__a , keep_accents=__a , bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , pad_token=__a , cls_token=__a , mask_token=__a , **__a , )
__magic_name__ = do_lower_case
__magic_name__ = remove_space
__magic_name__ = keep_accents
__magic_name__ = vocab_file
__magic_name__ = False if not self.vocab_file else True
def __A ( self , A , A = None ) -> List[int]:
'''simple docstring'''
__magic_name__ = [self.sep_token_id]
__magic_name__ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __A ( self , A , A = None , A = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__a )) + [1] + ([0] * len(__a )) + [1]
return [1] + ([0] * len(__a )) + [1]
def __A ( self , A , A = None ) -> List[int]:
'''simple docstring'''
__magic_name__ = [self.sep_token_id]
__magic_name__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self , A , A = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(__a ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(__a ) )
return
__magic_name__ = os.path.join(
__a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ):
copyfile(self.vocab_file , __a )
return (out_vocab_file,) | 720 |
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def _SCREAMING_SNAKE_CASE ( snake_case_ : Tuple , snake_case_ : Union[str, Any] , snake_case_ : List[str] , snake_case_ : Union[str, Any] ):
__magic_name__ = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, nicht wahr?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
__magic_name__ = {
'''wmt16-en-de-dist-12-1''': [28.3, 27.52],
'''wmt16-en-de-dist-6-1''': [27.4, 27.11],
'''wmt16-en-de-12-1''': [26.9, 25.75],
}
__magic_name__ = f'{src_lang}-{tgt_lang}'
__magic_name__ = f'\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "allenai/{model_name}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n'
model_card_dir.mkdir(parents=snake_case_ , exist_ok=snake_case_ )
__magic_name__ = os.path.join(snake_case_ , '''README.md''' )
print(f'Generating {path}' )
with open(snake_case_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(snake_case_ )
# make sure we are under the root of the project
a_ : Tuple = Path(__file__).resolve().parent.parent.parent
a_ : Dict = repo_dir / 'model_cards'
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
a_ : List[str] = model_cards_dir / 'allenai' / model_name
write_model_card(model_card_dir, src_lang='en', tgt_lang='de', model_name=model_name) | 678 | 0 |
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> List[str]:
'''simple docstring'''
super().tearDown()
gc.collect()
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
__magic_name__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
__magic_name__ = '''xvjiarui/stable-diffusion-2-inpainting'''
__magic_name__ , __magic_name__ = FlaxStableDiffusionInpaintPipeline.from_pretrained(UpperCAmelCase__ , safety_checker=UpperCAmelCase__ )
__magic_name__ = '''Face of a yellow cat, high resolution, sitting on a park bench'''
__magic_name__ = jax.random.PRNGKey(0 )
__magic_name__ = 50
__magic_name__ = jax.device_count()
__magic_name__ = num_samples * [prompt]
__magic_name__ = num_samples * [init_image]
__magic_name__ = num_samples * [mask_image]
__magic_name__ , __magic_name__ , __magic_name__ = pipeline.prepare_inputs(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# shard inputs and rng
__magic_name__ = replicate(UpperCAmelCase__ )
__magic_name__ = jax.random.split(UpperCAmelCase__ , jax.device_count() )
__magic_name__ = shard(UpperCAmelCase__ )
__magic_name__ = shard(UpperCAmelCase__ )
__magic_name__ = shard(UpperCAmelCase__ )
__magic_name__ = pipeline(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , jit=UpperCAmelCase__ )
__magic_name__ = output.images.reshape(UpperCAmelCase__ , 5_12 , 5_12 , 3 )
__magic_name__ = images[0, 2_53:2_56, 2_53:2_56, -1]
__magic_name__ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__magic_name__ = jnp.array(
[0.3_61_13_07, 0.37_64_97_36, 0.3_75_74_08, 0.38_21_39_53, 0.39_29_51_67, 0.3_84_16_31, 0.41_55_49_78, 0.4_13_74_75, 0.4_21_70_84] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2 | 721 |
def _SCREAMING_SNAKE_CASE ( snake_case_ : list[int] , snake_case_ : list[int] ):
__magic_name__ = len(snake_case_ )
print('''The following activities are selected:''' )
# The first activity is always selected
__magic_name__ = 0
print(snake_case_ , end=''',''' )
# Consider rest of the activities
for j in range(snake_case_ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(snake_case_ , end=''',''' )
__magic_name__ = j
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ : Dict = [1, 3, 0, 5, 8, 5]
a_ : Union[str, Any] = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish) | 678 | 0 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
a_ : Optional[int] = logging.get_logger(__name__)
a_ : Union[str, Any] = {
'Salesforce/codegen-350M-nl': 'https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json',
'Salesforce/codegen-350M-multi': 'https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json',
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json',
'Salesforce/codegen-2B-nl': 'https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json',
'Salesforce/codegen-2B-multi': 'https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json',
'Salesforce/codegen-2B-mono': 'https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json',
'Salesforce/codegen-6B-nl': 'https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json',
'Salesforce/codegen-6B-multi': 'https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json',
'Salesforce/codegen-6B-mono': 'https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json',
'Salesforce/codegen-16B-nl': 'https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json',
'Salesforce/codegen-16B-multi': 'https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json',
'Salesforce/codegen-16B-mono': 'https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
_a = """codegen"""
_a = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , A=5_04_00 , A=20_48 , A=20_48 , A=40_96 , A=28 , A=16 , A=64 , A=None , A="gelu_new" , A=0.0 , A=0.0 , A=0.0 , A=1E-5 , A=0.02 , A=True , A=5_02_56 , A=5_02_56 , A=False , **A , ) -> List[Any]:
'''simple docstring'''
__magic_name__ = vocab_size
__magic_name__ = n_ctx
__magic_name__ = n_positions
__magic_name__ = n_embd
__magic_name__ = n_layer
__magic_name__ = n_head
__magic_name__ = n_inner
__magic_name__ = rotary_dim
__magic_name__ = activation_function
__magic_name__ = resid_pdrop
__magic_name__ = embd_pdrop
__magic_name__ = attn_pdrop
__magic_name__ = layer_norm_epsilon
__magic_name__ = initializer_range
__magic_name__ = use_cache
__magic_name__ = bos_token_id
__magic_name__ = eos_token_id
super().__init__(
bos_token_id=A__ , eos_token_id=A__ , tie_word_embeddings=A__ , **A__ )
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
def __init__( self , A , A = "default" , A = None , A = False , ) -> str:
'''simple docstring'''
super().__init__(A__ , task=A__ , patching_specs=A__ , use_past=A__ )
if not getattr(self._config , '''pad_token_id''' , A__ ):
# TODO: how to do that better?
__magic_name__ = 0
@property
def __A ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
__magic_name__ = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(A__ , direction='''inputs''' )
__magic_name__ = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
__magic_name__ = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def __A ( self ) -> int:
'''simple docstring'''
return self._config.n_layer
@property
def __A ( self ) -> int:
'''simple docstring'''
return self._config.n_head
def __A ( self , A , A = -1 , A = -1 , A = False , A = None , ) -> Mapping[str, Any]:
'''simple docstring'''
__magic_name__ = super(A__ , self ).generate_dummy_inputs(
A__ , batch_size=A__ , seq_length=A__ , is_pair=A__ , framework=A__ )
# We need to order the input in the way they appears in the forward()
__magic_name__ = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__magic_name__ , __magic_name__ = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
__magic_name__ = seqlen + 2
__magic_name__ = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__magic_name__ = [
(torch.zeros(A__ ), torch.zeros(A__ )) for _ in range(self.num_layers )
]
__magic_name__ = common_inputs['''attention_mask''']
if self.use_past:
__magic_name__ = ordered_inputs['''attention_mask'''].dtype
__magic_name__ = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(A__ , A__ , dtype=A__ )] , dim=1 )
return ordered_inputs
@property
def __A ( self ) -> int:
'''simple docstring'''
return 13
| 700 |
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
a_ : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
a_ : List[str] = 256
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = ["""melgan"""]
def __init__( self , A , A , A , A , A , ) -> None:
'''simple docstring'''
super().__init__()
# From MELGAN
__magic_name__ = math.log(1E-5 ) # Matches MelGAN training.
__magic_name__ = 4.0 # Largest value for most examples
__magic_name__ = 1_28
self.register_modules(
notes_encoder=A , continuous_encoder=A , decoder=A , scheduler=A , melgan=A , )
def __A ( self , A , A=(-1.0, 1.0) , A=False ) -> List[Any]:
'''simple docstring'''
__magic_name__ , __magic_name__ = output_range
if clip:
__magic_name__ = torch.clip(A , self.min_value , self.max_value )
# Scale to [0, 1].
__magic_name__ = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def __A ( self , A , A=(-1.0, 1.0) , A=False ) -> Optional[int]:
'''simple docstring'''
__magic_name__ , __magic_name__ = input_range
__magic_name__ = torch.clip(A , A , A ) if clip else outputs
# Scale to [0, 1].
__magic_name__ = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def __A ( self , A , A , A ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = input_tokens > 0
__magic_name__ , __magic_name__ = self.notes_encoder(
encoder_input_tokens=A , encoder_inputs_mask=A )
__magic_name__ , __magic_name__ = self.continuous_encoder(
encoder_inputs=A , encoder_inputs_mask=A )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def __A ( self , A , A , A ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = noise_time
if not torch.is_tensor(A ):
__magic_name__ = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(A ) and len(timesteps.shape ) == 0:
__magic_name__ = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__magic_name__ = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
__magic_name__ = self.decoder(
encodings_and_masks=A , decoder_input_tokens=A , decoder_noise_time=A )
return logits
@torch.no_grad()
def __call__( self , A , A = None , A = 1_00 , A = True , A = "numpy" , A = None , A = 1 , ) -> Union[AudioPipelineOutput, Tuple]:
'''simple docstring'''
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A , A ) or callback_steps <= 0)
):
raise ValueError(
F'`callback_steps` has to be a positive integer but is {callback_steps} of type'
F' {type(A )}.' )
__magic_name__ = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
__magic_name__ = np.zeros([1, 0, self.n_dims] , np.floataa )
__magic_name__ = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=A , device=self.device )
for i, encoder_input_tokens in enumerate(A ):
if i == 0:
__magic_name__ = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
__magic_name__ = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=A , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
__magic_name__ = ones
__magic_name__ = self.scale_features(
A , output_range=[-1.0, 1.0] , clip=A )
__magic_name__ = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=A , continuous_mask=A , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
__magic_name__ = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=A , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(A )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__magic_name__ = self.decode(
encodings_and_masks=A , input_tokens=A , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
__magic_name__ = self.scheduler.step(A , A , A , generator=A ).prev_sample
__magic_name__ = self.scale_to_features(A , input_range=[-1.0, 1.0] )
__magic_name__ = mel[:1]
__magic_name__ = mel.cpu().float().numpy()
__magic_name__ = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A , A )
logger.info('''Generated segment''' , A )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'''Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.''' )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'''Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.''' )
if output_type == "numpy":
__magic_name__ = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
__magic_name__ = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=A ) | 678 | 0 |
def _SCREAMING_SNAKE_CASE ( snake_case_ : Any ):
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' )
if len(_SCREAMING_SNAKE_CASE ) == 0:
raise ValueError('''Input list must be a non empty list''' )
if len(_SCREAMING_SNAKE_CASE ) == 1:
return True
__magic_name__ = series[1] - series[0]
for index in range(len(_SCREAMING_SNAKE_CASE ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] ):
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' )
if len(_SCREAMING_SNAKE_CASE ) == 0:
raise ValueError('''Input list must be a non empty list''' )
__magic_name__ = 0
for val in series:
answer += val
return answer / len(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod() | 701 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel | 678 | 0 |
from math import sqrt
def _SCREAMING_SNAKE_CASE ( snake_case_ : int = 100_0000 ):
__magic_name__ = 0
__magic_name__ = 0
__magic_name__ = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(lowercase__ , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F"""{solution() = }""") | 702 |
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] ):
__magic_name__ = SwinConfig(image_size=192 )
if "base" in model_name:
__magic_name__ = 6
__magic_name__ = 128
__magic_name__ = (2, 2, 18, 2)
__magic_name__ = (4, 8, 16, 32)
elif "large" in model_name:
__magic_name__ = 12
__magic_name__ = 192
__magic_name__ = (2, 2, 18, 2)
__magic_name__ = (6, 12, 24, 48)
else:
raise ValueError('''Model not supported, only supports base and large variants''' )
__magic_name__ = window_size
__magic_name__ = embed_dim
__magic_name__ = depths
__magic_name__ = num_heads
return config
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] ):
if "encoder.mask_token" in name:
__magic_name__ = name.replace('''encoder.mask_token''' , '''embeddings.mask_token''' )
if "encoder.patch_embed.proj" in name:
__magic_name__ = name.replace('''encoder.patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "encoder.patch_embed.norm" in name:
__magic_name__ = name.replace('''encoder.patch_embed.norm''' , '''embeddings.norm''' )
if "attn.proj" in name:
__magic_name__ = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
__magic_name__ = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
__magic_name__ = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
__magic_name__ = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
__magic_name__ = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
__magic_name__ = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
__magic_name__ = '''layernorm.weight'''
if name == "encoder.norm.bias":
__magic_name__ = '''layernorm.bias'''
if "decoder" in name:
pass
else:
__magic_name__ = '''swin.''' + name
return name
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Any ):
for key in orig_state_dict.copy().keys():
__magic_name__ = orig_state_dict.pop(snake_case_ )
if "attn_mask" in key:
pass
elif "qkv" in key:
__magic_name__ = key.split('''.''' )
__magic_name__ = int(key_split[2] )
__magic_name__ = int(key_split[4] )
__magic_name__ = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__magic_name__ = val[:dim, :]
__magic_name__ = val[
dim : dim * 2, :
]
__magic_name__ = val[-dim:, :]
else:
__magic_name__ = val[
:dim
]
__magic_name__ = val[
dim : dim * 2
]
__magic_name__ = val[
-dim:
]
else:
__magic_name__ = val
return orig_state_dict
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[str] , snake_case_ : int , snake_case_ : Any , snake_case_ : str ):
__magic_name__ = torch.load(snake_case_ , map_location='''cpu''' )['''model''']
__magic_name__ = get_swin_config(snake_case_ )
__magic_name__ = SwinForMaskedImageModeling(snake_case_ )
model.eval()
__magic_name__ = convert_state_dict(snake_case_ , snake_case_ )
model.load_state_dict(snake_case_ )
__magic_name__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__magic_name__ = ViTImageProcessor(size={'''height''': 192, '''width''': 192} )
__magic_name__ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
__magic_name__ = image_processor(images=snake_case_ , return_tensors='''pt''' )
with torch.no_grad():
__magic_name__ = model(**snake_case_ ).logits
print(outputs.keys() )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case_ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(snake_case_ )
if push_to_hub:
print(f'Pushing model and image processor for {model_name} to hub' )
model.push_to_hub(f'microsoft/{model_name}' )
image_processor.push_to_hub(f'microsoft/{model_name}' )
if __name__ == "__main__":
a_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='swin-base-simmim-window6-192',
type=str,
choices=['swin-base-simmim-window6-192', 'swin-large-simmim-window12-192'],
help='Name of the Swin SimMIM model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth',
type=str,
help='Path to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
a_ : Optional[Any] = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub) | 678 | 0 |
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
if not is_accelerate_available():
return method
__magic_name__ = version.parse(accelerate.__version__ ).base_version
if version.parse(lowerCAmelCase_ ) < version.parse('''0.17.0''' ):
return method
def wrapper(self : List[str] , *snake_case_ : str , **snake_case_ : List[Any] ):
if hasattr(self , '''_hf_hook''' ) and hasattr(self._hf_hook , '''pre_forward''' ):
self._hf_hook.pre_forward(self )
return method(self , *lowerCAmelCase_ , **lowerCAmelCase_ )
return wrapper | 703 |
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
return "".join(sorted(snake_case_ ) )
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
return word_by_signature[signature(snake_case_ )]
a_ : str = Path(__file__).parent.joinpath('words.txt').read_text(encoding='utf-8')
a_ : Optional[Any] = sorted({word.strip().lower() for word in data.splitlines()})
a_ : List[Any] = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
a_ : Optional[Any] = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('anagrams.txt', 'w') as file:
file.write('all_anagrams = \n ')
file.write(pprint.pformat(all_anagrams)) | 678 | 0 |
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
a_ = False
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
pass
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> int:
'''simple docstring'''
__magic_name__ = VersatileDiffusionImageVariationPipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
__magic_name__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
__magic_name__ = torch.manual_seed(0 )
__magic_name__ = pipe(
image=lowercase__ , generator=lowercase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
__magic_name__ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
__magic_name__ = np.array([0.04_41, 0.04_69, 0.05_07, 0.05_75, 0.06_32, 0.06_50, 0.08_65, 0.09_09, 0.09_45] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 | 704 |
from __future__ import annotations
from scipy.special import comb # type: ignore
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , A ) -> Tuple:
'''simple docstring'''
__magic_name__ = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
__magic_name__ = len(A ) - 1
def __A ( self , A ) -> list[float]:
'''simple docstring'''
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__magic_name__ = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , A ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(A ) , 5 ) == 1
return output_values
def __A ( self , A ) -> tuple[float, float]:
'''simple docstring'''
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__magic_name__ = self.basis_function(A )
__magic_name__ = 0.0
__magic_name__ = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def __A ( self , A = 0.01 ) -> Tuple:
'''simple docstring'''
from matplotlib import pyplot as plt # type: ignore
__magic_name__ = [] # x coordinates of points to plot
__magic_name__ = [] # y coordinates of points to plot
__magic_name__ = 0.0
while t <= 1:
__magic_name__ = self.bezier_curve_function(A )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
__magic_name__ = [i[0] for i in self.list_of_points]
__magic_name__ = [i[1] for i in self.list_of_points]
plt.plot(
A , A , color='''blue''' , label='''Curve of Degree ''' + str(self.degree ) , )
plt.scatter(A , A , color='''red''' , label='''Control Points''' )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3 | 678 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ : Dict = logging.get_logger(__name__)
a_ : Tuple = {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/config.json',
'umberto-commoncrawl-cased-v1': (
'https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'
),
'umberto-wikipedia-uncased-v1': (
'https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'
),
}
class SCREAMING_SNAKE_CASE_ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_a = """camembert"""
def __init__( self , A=3_05_22 , A=7_68 , A=12 , A=12 , A=30_72 , A="gelu" , A=0.1 , A=0.1 , A=5_12 , A=2 , A=0.02 , A=1E-12 , A=1 , A=0 , A=2 , A="absolute" , A=True , A=None , **A , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
__magic_name__ = vocab_size
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = hidden_act
__magic_name__ = intermediate_size
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = max_position_embeddings
__magic_name__ = type_vocab_size
__magic_name__ = initializer_range
__magic_name__ = layer_norm_eps
__magic_name__ = position_embedding_type
__magic_name__ = use_cache
__magic_name__ = classifier_dropout
class SCREAMING_SNAKE_CASE_ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@property
def __A ( self ) -> Dict:
'''simple docstring'''
if self.task == "multiple-choice":
__magic_name__ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__magic_name__ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] ) | 705 |
import re
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
__magic_name__ = re.compile(
r'''^(?:0|94|\+94|0{2}94)''' r'''7(0|1|2|4|5|6|7|8)''' r'''(-| |)''' r'''\d{7}$''' )
return bool(re.search(snake_case_ , snake_case_ ) )
if __name__ == "__main__":
a_ : Optional[int] = '0094702343221'
print(is_sri_lankan_phone_number(phone)) | 678 | 0 |
import math
from collections.abc import Callable
def _SCREAMING_SNAKE_CASE ( snake_case_ : Callable[[float], float] , snake_case_ : float , snake_case_ : float ):
__magic_name__ = xa
__magic_name__ = xa
while True:
if x_n == x_na or function(snake_case_ ) == function(snake_case_ ):
raise ZeroDivisionError('''float division by zero, could not find root''' )
__magic_name__ = x_na - (
function(snake_case_ ) / ((function(snake_case_ ) - function(snake_case_ )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
__magic_name__ = x_na
__magic_name__ = x_na
def _SCREAMING_SNAKE_CASE ( snake_case_ : float ):
return math.pow(snake_case_ , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 706 |
import os
import sys
import unittest
a_ : int = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
a_ : Optional[Any] = os.path.join(git_repo_path, 'src', 'diffusers')
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = find_backend(''' if not is_torch_available():''' )
self.assertEqual(A , '''torch''' )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
__magic_name__ = find_backend(''' if not (is_torch_available() and is_transformers_available()):''' )
self.assertEqual(A , '''torch_and_transformers''' )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
__magic_name__ = find_backend(
''' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):''' )
self.assertEqual(A , '''torch_and_transformers_and_onnx''' )
def __A ( self ) -> str:
'''simple docstring'''
__magic_name__ = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , A )
self.assertIn('''torch_and_transformers''' , A )
self.assertIn('''flax_and_transformers''' , A )
self.assertIn('''torch_and_transformers_and_onnx''' , A )
# Likewise, we can't assert on the exact content of a key
self.assertIn('''UNet2DModel''' , objects['''torch'''] )
self.assertIn('''FlaxUNet2DConditionModel''' , objects['''flax'''] )
self.assertIn('''StableDiffusionPipeline''' , objects['''torch_and_transformers'''] )
self.assertIn('''FlaxStableDiffusionPipeline''' , objects['''flax_and_transformers'''] )
self.assertIn('''LMSDiscreteScheduler''' , objects['''torch_and_scipy'''] )
self.assertIn('''OnnxStableDiffusionPipeline''' , objects['''torch_and_transformers_and_onnx'''] )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = create_dummy_object('''CONSTANT''' , '''\'torch\'''' )
self.assertEqual(A , '''\nCONSTANT = None\n''' )
__magic_name__ = create_dummy_object('''function''' , '''\'torch\'''' )
self.assertEqual(
A , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' )
__magic_name__ = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
'''
__magic_name__ = create_dummy_object('''FakeClass''' , '''\'torch\'''' )
self.assertEqual(A , A )
def __A ( self ) -> int:
'''simple docstring'''
__magic_name__ = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
'''
__magic_name__ = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} )
self.assertEqual(dummy_files['''torch'''] , A ) | 678 | 0 |
from manim import *
class SCREAMING_SNAKE_CASE_ ( _UpperCamelCase ):
"""simple docstring"""
def __A ( self ) -> Dict:
'''simple docstring'''
__magic_name__ = Rectangle(height=0.5 , width=0.5 )
__magic_name__ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
__magic_name__ = [mem.copy() for i in range(6 )]
__magic_name__ = [mem.copy() for i in range(6 )]
__magic_name__ = VGroup(*__a ).arrange(__a , buff=0 )
__magic_name__ = VGroup(*__a ).arrange(__a , buff=0 )
__magic_name__ = VGroup(__a , __a ).arrange(__a , buff=0 )
__magic_name__ = Text('''CPU''' , font_size=24 )
__magic_name__ = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__a )
__magic_name__ = [mem.copy() for i in range(4 )]
__magic_name__ = VGroup(*__a ).arrange(__a , buff=0 )
__magic_name__ = Text('''GPU''' , font_size=24 )
__magic_name__ = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
gpu.move_to([-1, -1, 0] )
self.add(__a )
__magic_name__ = [mem.copy() for i in range(6 )]
__magic_name__ = VGroup(*__a ).arrange(__a , buff=0 )
__magic_name__ = Text('''Model''' , font_size=24 )
__magic_name__ = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
model.move_to([3, -1.0, 0] )
self.add(__a )
__magic_name__ = []
for i, rect in enumerate(__a ):
rect.set_stroke(__a )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
__magic_name__ = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__a , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__a )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=__a , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=__a , buff=0.0 )
self.add(__a )
cpu_targs.append(__a )
__magic_name__ = [mem.copy() for i in range(6 )]
__magic_name__ = VGroup(*__a ).arrange(__a , buff=0 )
__magic_name__ = Text('''Loaded Checkpoint''' , font_size=24 )
__magic_name__ = Group(__a , __a ).arrange(__a , aligned_edge=__a , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
__magic_name__ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__magic_name__ = MarkupText(
F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__a , __a )
__magic_name__ = MarkupText(
F'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , )
blue_text.next_to(__a , DOWN * 2.4 , aligned_edge=key_text.get_left() )
__magic_name__ = MarkupText(
F'Next, a <i><span fgcolor="{BLUE}">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor="{BLUE}">single shard</span>.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__a ) , Write(__a ) )
self.play(Write(__a , run_time=1 ) , Create(__a , run_time=1 ) )
__magic_name__ = []
__magic_name__ = []
for i, rect in enumerate(__a ):
__magic_name__ = fill.copy().set_fill(__a , opacity=0.7 )
target.move_to(__a )
first_animations.append(GrowFromCenter(__a , run_time=1 ) )
__magic_name__ = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(__a , run_time=1.5 ) )
self.play(*__a )
self.play(*__a )
self.wait() | 707 |
def _SCREAMING_SNAKE_CASE ( snake_case_ : list[list[int]] , snake_case_ : int , snake_case_ : int , snake_case_ : set ):
__magic_name__ , __magic_name__ = len(snake_case_ ), len(grid[0] )
if (
min(snake_case_ , snake_case_ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
__magic_name__ = 0
count += depth_first_search(snake_case_ , row + 1 , snake_case_ , snake_case_ )
count += depth_first_search(snake_case_ , row - 1 , snake_case_ , snake_case_ )
count += depth_first_search(snake_case_ , snake_case_ , col + 1 , snake_case_ )
count += depth_first_search(snake_case_ , snake_case_ , col - 1 , snake_case_ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod() | 678 | 0 |
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[str] , snake_case_ : Optional[int] ):
assert isinstance(__UpperCamelCase , __UpperCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] , snake_case_ : List[str] , snake_case_ : Optional[Any] ):
__magic_name__ = tmp_path / '''cache'''
__magic_name__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__magic_name__ = JsonDatasetReader(__UpperCamelCase , cache_dir=__UpperCamelCase , keep_in_memory=__UpperCamelCase ).read()
_check_json_dataset(__UpperCamelCase , __UpperCamelCase )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[str] , snake_case_ : List[str] , snake_case_ : Any ):
__magic_name__ = tmp_path / '''cache'''
__magic_name__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__magic_name__ = features.copy() if features else default_expected_features
__magic_name__ = (
Features({feature: Value(__UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__magic_name__ = JsonDatasetReader(__UpperCamelCase , features=__UpperCamelCase , cache_dir=__UpperCamelCase ).read()
_check_json_dataset(__UpperCamelCase , __UpperCamelCase )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''},
] , )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : Any , snake_case_ : Tuple ):
__magic_name__ = tmp_path / '''cache'''
__magic_name__ = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}
__magic_name__ = features.copy() if features else default_expected_features
__magic_name__ = (
Features({feature: Value(__UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__magic_name__ = JsonDatasetReader(__UpperCamelCase , features=__UpperCamelCase , cache_dir=__UpperCamelCase ).read()
assert isinstance(__UpperCamelCase , __UpperCamelCase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : List[str] ):
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
__magic_name__ = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''}
__magic_name__ = features.copy()
__magic_name__ = (
Features({feature: Value(__UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__magic_name__ = tmp_path / '''cache'''
__magic_name__ = JsonDatasetReader(__UpperCamelCase , features=__UpperCamelCase , cache_dir=__UpperCamelCase ).read()
assert isinstance(__UpperCamelCase , __UpperCamelCase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] , snake_case_ : List[Any] , snake_case_ : Any ):
__magic_name__ = tmp_path / '''cache'''
__magic_name__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__magic_name__ = JsonDatasetReader(__UpperCamelCase , cache_dir=__UpperCamelCase , split=__UpperCamelCase ).read()
_check_json_dataset(__UpperCamelCase , __UpperCamelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : Optional[Any] ):
if issubclass(__UpperCamelCase , __UpperCamelCase ):
__magic_name__ = jsonl_path
elif issubclass(__UpperCamelCase , __UpperCamelCase ):
__magic_name__ = [jsonl_path]
__magic_name__ = tmp_path / '''cache'''
__magic_name__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__magic_name__ = JsonDatasetReader(__UpperCamelCase , cache_dir=__UpperCamelCase ).read()
_check_json_dataset(__UpperCamelCase , __UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] , snake_case_ : Any , snake_case_ : Union[str, Any]=("train",) ):
assert isinstance(__UpperCamelCase , __UpperCamelCase )
for split in splits:
__magic_name__ = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Dict , snake_case_ : int , snake_case_ : Optional[Any] ):
__magic_name__ = tmp_path / '''cache'''
__magic_name__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__magic_name__ = JsonDatasetReader({'''train''': jsonl_path} , cache_dir=__UpperCamelCase , keep_in_memory=__UpperCamelCase ).read()
_check_json_datasetdict(__UpperCamelCase , __UpperCamelCase )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] , snake_case_ : Optional[int] , snake_case_ : Dict ):
__magic_name__ = tmp_path / '''cache'''
__magic_name__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__magic_name__ = features.copy() if features else default_expected_features
__magic_name__ = (
Features({feature: Value(__UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__magic_name__ = JsonDatasetReader({'''train''': jsonl_path} , features=__UpperCamelCase , cache_dir=__UpperCamelCase ).read()
_check_json_datasetdict(__UpperCamelCase , __UpperCamelCase )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] , snake_case_ : Tuple , snake_case_ : Optional[int] ):
if split:
__magic_name__ = {split: jsonl_path}
else:
__magic_name__ = '''train'''
__magic_name__ = {'''train''': jsonl_path, '''test''': jsonl_path}
__magic_name__ = tmp_path / '''cache'''
__magic_name__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__magic_name__ = JsonDatasetReader(__UpperCamelCase , cache_dir=__UpperCamelCase ).read()
_check_json_datasetdict(__UpperCamelCase , __UpperCamelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def _SCREAMING_SNAKE_CASE ( snake_case_ : int ):
return json.load(__UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] ):
return [json.loads(__UpperCamelCase ) for line in buffer]
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def __A ( self , A , A , A ) -> Union[str, Any]:
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(_snake_case , _snake_case , lines=_snake_case ).write()
buffer.seek(0 )
__magic_name__ = load_json_function(_snake_case )
assert isinstance(_snake_case , _snake_case )
assert isinstance(exported_content[0] , _snake_case )
assert len(_snake_case ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def __A ( self , A , A , A , A , A ) -> Optional[int]:
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(_snake_case , _snake_case , lines=_snake_case , orient=_snake_case ).write()
buffer.seek(0 )
__magic_name__ = load_json(_snake_case )
assert isinstance(_snake_case , _snake_case )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(_snake_case , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(_snake_case ) == 10
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def __A ( self , A , A , A ) -> Optional[Any]:
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(_snake_case , _snake_case , lines=_snake_case , num_proc=2 ).write()
buffer.seek(0 )
__magic_name__ = load_json_function(_snake_case )
assert isinstance(_snake_case , _snake_case )
assert isinstance(exported_content[0] , _snake_case )
assert len(_snake_case ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def __A ( self , A , A , A , A , A ) -> Optional[int]:
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(_snake_case , _snake_case , lines=_snake_case , orient=_snake_case , num_proc=2 ).write()
buffer.seek(0 )
__magic_name__ = load_json(_snake_case )
assert isinstance(_snake_case , _snake_case )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(_snake_case , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(_snake_case ) == 10
def __A ( self , A ) -> Any:
'''simple docstring'''
with pytest.raises(_snake_case ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_snake_case , _snake_case , num_proc=0 )
@pytest.mark.parametrize('''compression, extension''' , [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] )
def __A ( self , A , A , A , A , A ) -> int:
'''simple docstring'''
__magic_name__ = tmp_path_factory.mktemp('''data''' ) / F'test.json.{extension}'
__magic_name__ = str(shared_datadir / F'test_file.json.{extension}' )
JsonDatasetWriter(_snake_case , _snake_case , compression=_snake_case ).write()
with fsspec.open(_snake_case , '''rb''' , compression='''infer''' ) as f:
__magic_name__ = f.read()
with fsspec.open(_snake_case , '''rb''' , compression='''infer''' ) as f:
__magic_name__ = f.read()
assert exported_content == original_content | 708 |
a_ : Dict = {
'meter': 'm',
'kilometer': 'km',
'megametre': 'Mm',
'gigametre': 'Gm',
'terametre': 'Tm',
'petametre': 'Pm',
'exametre': 'Em',
'zettametre': 'Zm',
'yottametre': 'Ym',
}
# Exponent of the factor(meter)
a_ : str = {
'm': 0,
'km': 3,
'Mm': 6,
'Gm': 9,
'Tm': 12,
'Pm': 15,
'Em': 18,
'Zm': 21,
'Ym': 24,
}
def _SCREAMING_SNAKE_CASE ( snake_case_ : float , snake_case_ : str , snake_case_ : str ):
__magic_name__ = from_type.lower().strip('''s''' )
__magic_name__ = to_type.lower().strip('''s''' )
__magic_name__ = UNIT_SYMBOL.get(snake_case_ , snake_case_ )
__magic_name__ = UNIT_SYMBOL.get(snake_case_ , snake_case_ )
if from_sanitized not in METRIC_CONVERSION:
__magic_name__ = (
f'Invalid \'from_type\' value: {from_type!r}.\n'
f'Conversion abbreviations are: {", ".join(snake_case_ )}'
)
raise ValueError(snake_case_ )
if to_sanitized not in METRIC_CONVERSION:
__magic_name__ = (
f'Invalid \'to_type\' value: {to_type!r}.\n'
f'Conversion abbreviations are: {", ".join(snake_case_ )}'
)
raise ValueError(snake_case_ )
__magic_name__ = METRIC_CONVERSION[from_sanitized]
__magic_name__ = METRIC_CONVERSION[to_sanitized]
__magic_name__ = 1
if from_exponent > to_exponent:
__magic_name__ = from_exponent - to_exponent
else:
__magic_name__ = -(to_exponent - from_exponent)
return value * pow(10 , snake_case_ )
if __name__ == "__main__":
from doctest import testmod
testmod() | 678 | 0 |
import math
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[str] , snake_case_ : Optional[Any] ):
__magic_name__ = len(snake_case_ )
__magic_name__ = int(math.floor(math.sqrt(snake_case_ ) ) )
__magic_name__ = 0
while arr[min(snake_case_ , snake_case_ ) - 1] < x:
__magic_name__ = step
step += int(math.floor(math.sqrt(snake_case_ ) ) )
if prev >= n:
return -1
while arr[prev] < x:
__magic_name__ = prev + 1
if prev == min(snake_case_ , snake_case_ ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
a_ : List[str] = input('Enter numbers separated by a comma:\n').strip()
a_ : List[str] = [int(item) for item in user_input.split(',')]
a_ : Dict = int(input('Enter the number to be searched:\n'))
a_ : List[Any] = jump_search(arr, x)
if res == -1:
print('Number not found!')
else:
print(F"""Number {x} is at index {res}""") | 709 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
a_ : Union[str, Any] = {
'configuration_longt5': ['LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LongT5Config', 'LongT5OnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : int = [
'LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'LongT5EncoderModel',
'LongT5ForConditionalGeneration',
'LongT5Model',
'LongT5PreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Dict = [
'FlaxLongT5ForConditionalGeneration',
'FlaxLongT5Model',
'FlaxLongT5PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
a_ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 678 | 0 |
import math
def _SCREAMING_SNAKE_CASE ( snake_case_ : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[str] = 1_0001 ):
try:
__magic_name__ = int(snake_case_ )
except (TypeError, ValueError):
raise TypeError('''Parameter nth must be int or castable to int.''' ) from None
if nth <= 0:
raise ValueError('''Parameter nth must be greater than or equal to one.''' )
__magic_name__ = []
__magic_name__ = 2
while len(snake_case_ ) < nth:
if is_prime(snake_case_ ):
primes.append(snake_case_ )
num += 1
else:
num += 1
return primes[len(snake_case_ ) - 1]
if __name__ == "__main__":
print(F"""{solution() = }""") | 710 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , A , A=13 , A=7 , A=True , A=True , A=True , A=99 , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=5_12 , A=16 , A=2 , A=0.02 , A=3 , A=4 , A=None , ) -> str:
'''simple docstring'''
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = seq_length
__magic_name__ = is_training
__magic_name__ = use_token_type_ids
__magic_name__ = use_labels
__magic_name__ = vocab_size
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = intermediate_size
__magic_name__ = hidden_act
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = max_position_embeddings
__magic_name__ = type_vocab_size
__magic_name__ = type_sequence_label_size
__magic_name__ = initializer_range
__magic_name__ = num_labels
__magic_name__ = num_choices
__magic_name__ = scope
__magic_name__ = self.vocab_size - 1
def __A ( self ) -> str:
'''simple docstring'''
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ = None
if self.use_token_type_ids:
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
if self.use_labels:
__magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
__magic_name__ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def __A ( self , A , A , A , A , *A ) -> Tuple:
'''simple docstring'''
__magic_name__ = OpenAIGPTModel(config=A )
model.to(A )
model.eval()
__magic_name__ = model(A , token_type_ids=A , head_mask=A )
__magic_name__ = model(A , token_type_ids=A )
__magic_name__ = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , A , A , A , A , *A ) -> Dict:
'''simple docstring'''
__magic_name__ = OpenAIGPTLMHeadModel(A )
model.to(A )
model.eval()
__magic_name__ = model(A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , A , A , A , A , *A ) -> List[Any]:
'''simple docstring'''
__magic_name__ = OpenAIGPTDoubleHeadsModel(A )
model.to(A )
model.eval()
__magic_name__ = model(A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , A , A , A , A , *A ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = self.num_labels
__magic_name__ = OpenAIGPTForSequenceClassification(A )
model.to(A )
model.eval()
__magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ = model(A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self ) -> Dict:
'''simple docstring'''
__magic_name__ = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) = config_and_inputs
__magic_name__ = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_a = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
_a = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
_a = (
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def __A ( self , A , A , A , A , A ) -> List[str]:
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def __A ( self , A , A , A=False ) -> List[str]:
'''simple docstring'''
__magic_name__ = super()._prepare_for_class(A , A , return_labels=A )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
__magic_name__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=A , )
__magic_name__ = inputs_dict['''labels''']
__magic_name__ = inputs_dict['''labels''']
__magic_name__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=A , )
__magic_name__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A )
return inputs_dict
def __A ( self ) -> str:
'''simple docstring'''
__magic_name__ = OpenAIGPTModelTester(self )
__magic_name__ = ConfigTester(self , config_class=A , n_embd=37 )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __A ( self ) -> Any:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*A )
def __A ( self ) -> Dict:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*A )
def __A ( self ) -> List[str]:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*A )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*A )
@slow
def __A ( self ) -> List[str]:
'''simple docstring'''
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ = OpenAIGPTModel.from_pretrained(A )
self.assertIsNotNone(A )
@require_torch
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __A ( self ) -> Tuple:
'''simple docstring'''
__magic_name__ = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' )
model.to(A )
__magic_name__ = torch.tensor([[4_81, 47_35, 5_44]] , dtype=torch.long , device=A ) # the president is
__magic_name__ = [
4_81,
47_35,
5_44,
2_46,
9_63,
8_70,
7_62,
2_39,
2_44,
4_04_77,
2_44,
2_49,
7_19,
8_81,
4_87,
5_44,
2_40,
2_44,
6_03,
4_81,
] # the president is a very good man. " \n " i\'m sure he is, " said the
__magic_name__ = model.generate(A , do_sample=A )
self.assertListEqual(output_ids[0].tolist() , A ) | 678 | 0 |
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
a_ : str = logging.getLogger()
def _SCREAMING_SNAKE_CASE ( ):
__magic_name__ = argparse.ArgumentParser()
parser.add_argument('''-f''' )
__magic_name__ = parser.parse_args()
return args.f
def _SCREAMING_SNAKE_CASE ( snake_case_ : Any ):
__magic_name__ = {}
__magic_name__ = os.path.join(__UpperCamelCase , '''all_results.json''' )
if os.path.exists(__UpperCamelCase ):
with open(__UpperCamelCase , '''r''' ) as f:
__magic_name__ = json.load(__UpperCamelCase )
else:
raise ValueError(f'can\'t find {path}' )
return results
def _SCREAMING_SNAKE_CASE ( ):
__magic_name__ = torch.cuda.is_available() and torch_device == '''cuda'''
return is_using_cuda and is_apex_available()
a_ : Tuple = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
@classmethod
def __A ( cls ) -> List[Any]:
'''simple docstring'''
__magic_name__ = tempfile.mkdtemp()
__magic_name__ = os.path.join(cls.tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
__magic_name__ = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def __A ( cls ) -> Dict:
'''simple docstring'''
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __A ( self ) -> str:
'''simple docstring'''
__magic_name__ = self.get_auto_remove_tmp_dir()
__magic_name__ = F'\n {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --seed=42\n --checkpointing_steps epoch\n --with_tracking\n '.split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
__magic_name__ = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , '''glue_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __A ( self ) -> List[str]:
'''simple docstring'''
__magic_name__ = self.get_auto_remove_tmp_dir()
__magic_name__ = F'\n {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --block_size 128\n --per_device_train_batch_size 5\n --per_device_eval_batch_size 5\n --num_train_epochs 2\n --output_dir {tmp_dir}\n --checkpointing_steps epoch\n --with_tracking\n '.split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
__magic_name__ = get_results(UpperCamelCase__ )
self.assertLess(result['''perplexity'''] , 1_00 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , '''clm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __A ( self ) -> Any:
'''simple docstring'''
__magic_name__ = self.get_auto_remove_tmp_dir()
__magic_name__ = F'\n {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --num_train_epochs=1\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
__magic_name__ = get_results(UpperCamelCase__ )
self.assertLess(result['''perplexity'''] , 42 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , '''mlm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __A ( self ) -> Any:
'''simple docstring'''
__magic_name__ = 7 if get_gpu_count() > 1 else 2
__magic_name__ = self.get_auto_remove_tmp_dir()
__magic_name__ = F'\n {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
__magic_name__ = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertLess(result['''train_loss'''] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , '''ner_no_trainer''' ) ) )
@unittest.skip(reason='''Fix me @muellerzr''' )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = self.get_auto_remove_tmp_dir()
__magic_name__ = F'\n {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --seed=42\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
__magic_name__ = get_results(UpperCamelCase__ )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result['''eval_f1'''] , 28 )
self.assertGreaterEqual(result['''eval_exact'''] , 28 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , '''qa_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __A ( self ) -> Tuple:
'''simple docstring'''
__magic_name__ = self.get_auto_remove_tmp_dir()
__magic_name__ = F'\n {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/swag/sample.json\n --validation_file tests/fixtures/tests_samples/swag/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=20\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
__magic_name__ = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , '''swag_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __A ( self ) -> Any:
'''simple docstring'''
__magic_name__ = self.get_auto_remove_tmp_dir()
__magic_name__ = F'\n {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
__magic_name__ = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result['''eval_rouge1'''] , 10 )
self.assertGreaterEqual(result['''eval_rouge2'''] , 2 )
self.assertGreaterEqual(result['''eval_rougeL'''] , 7 )
self.assertGreaterEqual(result['''eval_rougeLsum'''] , 7 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , '''summarization_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ = self.get_auto_remove_tmp_dir()
__magic_name__ = F'\n {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py\n --model_name_or_path sshleifer/student_marian_en_ro_6_1\n --source_lang en\n --target_lang ro\n --train_file tests/fixtures/tests_samples/wmt16/sample.json\n --validation_file tests/fixtures/tests_samples/wmt16/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --num_beams=6\n --learning_rate=3e-3\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --source_lang en_XX\n --target_lang ro_RO\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
__magic_name__ = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result['''eval_bleu'''] , 30 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , '''translation_no_trainer''' ) ) )
@slow
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ = logging.StreamHandler(sys.stdout )
logger.addHandler(UpperCamelCase__ )
__magic_name__ = self.get_auto_remove_tmp_dir()
__magic_name__ = F'\n {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py\n --dataset_name huggingface/semantic-segmentation-test-sample\n --output_dir {tmp_dir}\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n '.split()
run_command(self._launch_args + testargs )
__magic_name__ = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result['''eval_overall_accuracy'''] , 0.10 )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __A ( self ) -> Tuple:
'''simple docstring'''
__magic_name__ = self.get_auto_remove_tmp_dir()
__magic_name__ = F'\n {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py\n --model_name_or_path google/vit-base-patch16-224-in21k\n --dataset_name hf-internal-testing/cats_vs_dogs_sample\n --learning_rate 1e-4\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 1\n --max_train_steps 2\n --train_val_split 0.1\n --seed 42\n --output_dir {tmp_dir}\n --with_tracking\n --checkpointing_steps 1\n '.split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
__magic_name__ = get_results(UpperCamelCase__ )
# The base model scores a 25%
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , '''step_1''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , '''image_classification_no_trainer''' ) ) ) | 711 |
def _SCREAMING_SNAKE_CASE ( ):
__magic_name__ = []
__magic_name__ = 1
while len(snake_case_ ) < 1E6:
constant.append(str(snake_case_ ) )
i += 1
__magic_name__ = ''''''.join(snake_case_ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[9_9999] )
* int(constant[99_9999] )
)
if __name__ == "__main__":
print(solution()) | 678 | 0 |
from typing import Any
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , A ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ = data
__magic_name__ = None
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self ) -> Dict:
'''simple docstring'''
__magic_name__ = None
def __A ( self ) -> Dict:
'''simple docstring'''
__magic_name__ = self.head
while temp is not None:
print(temp.data , end=''' ''' )
__magic_name__ = temp.next
print()
def __A ( self , A ) -> Any:
'''simple docstring'''
__magic_name__ = Node(_lowerCamelCase )
__magic_name__ = self.head
__magic_name__ = new_node
def __A ( self , A , A ) -> Tuple:
'''simple docstring'''
if node_data_a == node_data_a:
return
else:
__magic_name__ = self.head
while node_a is not None and node_a.data != node_data_a:
__magic_name__ = node_a.next
__magic_name__ = self.head
while node_a is not None and node_a.data != node_data_a:
__magic_name__ = node_a.next
if node_a is None or node_a is None:
return
__magic_name__ , __magic_name__ = node_a.data, node_a.data
if __name__ == "__main__":
a_ : Union[str, Any] = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print('After swapping')
ll.print_list() | 712 |
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
a_ : str = True
except ImportError:
a_ : Optional[int] = False
try:
from torch.hub import _get_torch_home
a_ : Optional[Any] = _get_torch_home()
except ImportError:
a_ : List[Any] = os.path.expanduser(
os.getenv('TORCH_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch'))
)
a_ : Any = os.path.join(torch_cache_home, 'transformers')
a_ : Any = 'https://cdn.huggingface.co'
a_ : Any = 'https://s3.amazonaws.com/models.huggingface.co/bert'
a_ : int = '/'.join(str(Path(__file__).resolve()).split('/')[:-1])
a_ : Any = os.path.join(PATH, 'config.yaml')
a_ : Any = os.path.join(PATH, 'attributes.txt')
a_ : Any = os.path.join(PATH, 'objects.txt')
a_ : List[Any] = os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', default_cache_path)
a_ : Any = os.getenv('PYTORCH_TRANSFORMERS_CACHE', PYTORCH_PRETRAINED_BERT_CACHE)
a_ : Optional[int] = os.getenv('TRANSFORMERS_CACHE', PYTORCH_TRANSFORMERS_CACHE)
a_ : int = 'pytorch_model.bin'
a_ : Union[str, Any] = 'config.yaml'
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any]=OBJECTS , snake_case_ : str=ATTRIBUTES ):
__magic_name__ = []
with open(snake_case_ ) as f:
for object in f.readlines():
vg_classes.append(object.split(''',''' )[0].lower().strip() )
__magic_name__ = []
with open(snake_case_ ) as f:
for object in f.readlines():
vg_attrs.append(object.split(''',''' )[0].lower().strip() )
return vg_classes, vg_attrs
def _SCREAMING_SNAKE_CASE ( snake_case_ : int ):
__magic_name__ = OrderedDict()
with open(snake_case_ , '''rb''' ) as f:
__magic_name__ = pkl.load(snake_case_ )['''model''']
for k in copy.deepcopy(list(ckp.keys() ) ):
__magic_name__ = ckp.pop(snake_case_ )
if isinstance(snake_case_ , np.ndarray ):
__magic_name__ = torch.tensor(snake_case_ )
else:
assert isinstance(snake_case_ , torch.tensor ), type(snake_case_ )
__magic_name__ = v
return r
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
_a = {}
def __init__( self , A , A = "root" , A=0 ) -> List[str]:
'''simple docstring'''
__magic_name__ = name
__magic_name__ = level
__magic_name__ = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
__magic_name__ = copy.deepcopy(A )
__magic_name__ = copy.deepcopy(A )
if isinstance(A , A ):
__magic_name__ = Config(A , name=A , level=level + 1 )
__magic_name__ = v
setattr(self , A , A )
__magic_name__ = d
def __repr__( self ) -> Union[str, Any]:
'''simple docstring'''
return str(list((self._pointer.keys()) ) )
def __setattr__( self , A , A ) -> Tuple:
'''simple docstring'''
__magic_name__ = val
__magic_name__ = val
__magic_name__ = key.split('''.''' )
__magic_name__ = len(A ) - 1
__magic_name__ = self._pointer
if len(A ) > 1:
for i, l in enumerate(A ):
if hasattr(self , A ) and isinstance(getattr(self , A ) , A ):
setattr(getattr(self , A ) , '''.'''.join(levels[i:] ) , A )
if l == last_level:
__magic_name__ = val
else:
__magic_name__ = pointer[l]
def __A ( self ) -> List[Any]:
'''simple docstring'''
return self._pointer
def __A ( self , A , A ) -> Any:
'''simple docstring'''
with open(F'{file_name}' , '''w''' ) as stream:
dump(A , A )
def __A ( self , A , A ) -> List[Any]:
'''simple docstring'''
with open(F'{file_name}' , '''w''' ) as stream:
json.dump(A , A )
@staticmethod
def __A ( A ) -> Optional[Any]:
'''simple docstring'''
with open(A ) as stream:
__magic_name__ = load(A , Loader=A )
return data
def __str__( self ) -> List[Any]:
'''simple docstring'''
__magic_name__ = ''' '''
if self._name != "root":
__magic_name__ = F'{t * (self._level-1)}{self._name}:\n'
else:
__magic_name__ = ''''''
__magic_name__ = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(A , A ):
r += F'{t * (self._level)}{v}\n'
self._level += 1
else:
r += F'{t * (self._level)}{k}: {v} ({type(A ).__name__})\n'
__magic_name__ = level
return r[:-1]
@classmethod
def __A ( cls , A , **A ) -> int:
'''simple docstring'''
__magic_name__ , __magic_name__ = cls.get_config_dict(A , **A )
return cls(A )
@classmethod
def __A ( cls , A , **A ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = kwargs.pop('''cache_dir''' , A )
__magic_name__ = kwargs.pop('''force_download''' , A )
__magic_name__ = kwargs.pop('''resume_download''' , A )
__magic_name__ = kwargs.pop('''proxies''' , A )
__magic_name__ = kwargs.pop('''local_files_only''' , A )
if os.path.isdir(A ):
__magic_name__ = os.path.join(A , A )
elif os.path.isfile(A ) or is_remote_url(A ):
__magic_name__ = pretrained_model_name_or_path
else:
__magic_name__ = hf_bucket_url(A , filename=A , use_cdn=A )
try:
# Load from URL or cache if already cached
__magic_name__ = cached_path(
A , cache_dir=A , force_download=A , proxies=A , resume_download=A , local_files_only=A , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
__magic_name__ = Config.load_yaml(A )
except EnvironmentError:
__magic_name__ = '''Can\'t load config for'''
raise EnvironmentError(A )
if resolved_config_file == config_file:
print('''loading configuration file from path''' )
else:
print('''loading configuration file cache''' )
return Config.load_yaml(A ), kwargs
def _SCREAMING_SNAKE_CASE ( snake_case_ : Tuple ):
__magic_name__ = torch.load('''dump.pt''' , map_location=in_tensor.device )
__magic_name__ = in_tensor.numpy()
__magic_name__ = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(snake_case_ , snake_case_ , rtol=0.01 , atol=0.1 ), (
f'{sum([1 for x in np.isclose(snake_case_ , snake_case_ , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %'
" element-wise mismatch"
)
raise Exception('''tensors are all good''' )
# Hugging face functions below
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] ):
__magic_name__ = urlparse(snake_case_ )
return parsed.scheme in ("http", "https")
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : str , snake_case_ : Optional[Any]=True ):
__magic_name__ = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
__magic_name__ = '''/''' not in model_id
if legacy_format:
return f'{endpoint}/{model_id}-{filename}'
else:
return f'{endpoint}/{model_id}/{filename}'
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Tuple , snake_case_ : List[str]=None , snake_case_ : Dict=0 , snake_case_ : Tuple=None , ):
__magic_name__ = '''python/{}'''.format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(snake_case_ , snake_case_ ):
ua += "; " + "; ".join('''{}/{}'''.format(snake_case_ , snake_case_ ) for k, v in user_agent.items() )
elif isinstance(snake_case_ , snake_case_ ):
ua += "; " + user_agent
__magic_name__ = {'''user-agent''': ua}
if resume_size > 0:
__magic_name__ = '''bytes=%d-''' % (resume_size,)
__magic_name__ = requests.get(snake_case_ , stream=snake_case_ , proxies=snake_case_ , headers=snake_case_ )
if response.status_code == 416: # Range not satisfiable
return
__magic_name__ = response.headers.get('''Content-Length''' )
__magic_name__ = resume_size + int(snake_case_ ) if content_length is not None else None
__magic_name__ = tqdm(
unit='''B''' , unit_scale=snake_case_ , total=snake_case_ , initial=snake_case_ , desc='''Downloading''' , )
for chunk in response.iter_content(chunk_size=1024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(snake_case_ ) )
temp_file.write(snake_case_ )
progress.close()
def _SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : Dict=None , snake_case_ : int=False , snake_case_ : List[Any]=None , snake_case_ : Tuple=10 , snake_case_ : int=False , snake_case_ : Any=None , snake_case_ : Tuple=False , ):
if cache_dir is None:
__magic_name__ = TRANSFORMERS_CACHE
if isinstance(snake_case_ , snake_case_ ):
__magic_name__ = str(snake_case_ )
os.makedirs(snake_case_ , exist_ok=snake_case_ )
__magic_name__ = None
if not local_files_only:
try:
__magic_name__ = requests.head(snake_case_ , allow_redirects=snake_case_ , proxies=snake_case_ , timeout=snake_case_ )
if response.status_code == 200:
__magic_name__ = response.headers.get('''ETag''' )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
__magic_name__ = url_to_filename(snake_case_ , snake_case_ )
# get cache path to put the file
__magic_name__ = os.path.join(snake_case_ , snake_case_ )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(snake_case_ ):
return cache_path
else:
__magic_name__ = [
file
for file in fnmatch.filter(os.listdir(snake_case_ ) , filename + '''.*''' )
if not file.endswith('''.json''' ) and not file.endswith('''.lock''' )
]
if len(snake_case_ ) > 0:
return os.path.join(snake_case_ , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
'''Cannot find the requested files in the cached path and outgoing traffic has been'''
''' disabled. To enable model look-ups and downloads online, set \'local_files_only\''''
''' to False.''' )
return None
# From now on, etag is not None.
if os.path.exists(snake_case_ ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
__magic_name__ = cache_path + '''.lock'''
with FileLock(snake_case_ ):
# If the download just completed while the lock was activated.
if os.path.exists(snake_case_ ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
__magic_name__ = cache_path + '''.incomplete'''
@contextmanager
def _resumable_file_manager():
with open(snake_case_ , '''a+b''' ) as f:
yield f
__magic_name__ = _resumable_file_manager
if os.path.exists(snake_case_ ):
__magic_name__ = os.stat(snake_case_ ).st_size
else:
__magic_name__ = 0
else:
__magic_name__ = partial(tempfile.NamedTemporaryFile , dir=snake_case_ , delete=snake_case_ )
__magic_name__ = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
'''%s not found in cache or force_download set to True, downloading to %s''' , snake_case_ , temp_file.name , )
http_get(
snake_case_ , snake_case_ , proxies=snake_case_ , resume_size=snake_case_ , user_agent=snake_case_ , )
os.replace(temp_file.name , snake_case_ )
__magic_name__ = {'''url''': url, '''etag''': etag}
__magic_name__ = cache_path + '''.json'''
with open(snake_case_ , '''w''' ) as meta_file:
json.dump(snake_case_ , snake_case_ )
return cache_path
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] , snake_case_ : List[Any]=None ):
__magic_name__ = url.encode('''utf-8''' )
__magic_name__ = shaaaa(snake_case_ )
__magic_name__ = url_hash.hexdigest()
if etag:
__magic_name__ = etag.encode('''utf-8''' )
__magic_name__ = shaaaa(snake_case_ )
filename += "." + etag_hash.hexdigest()
if url.endswith('''.h5''' ):
filename += ".h5"
return filename
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : str=None , snake_case_ : Tuple=False , snake_case_ : Union[str, Any]=None , snake_case_ : List[Any]=False , snake_case_ : Union[str, Any]=None , snake_case_ : List[str]=False , snake_case_ : Optional[int]=False , snake_case_ : Optional[int]=False , ):
if cache_dir is None:
__magic_name__ = TRANSFORMERS_CACHE
if isinstance(snake_case_ , snake_case_ ):
__magic_name__ = str(snake_case_ )
if isinstance(snake_case_ , snake_case_ ):
__magic_name__ = str(snake_case_ )
if is_remote_url(snake_case_ ):
# URL, so get it from the cache (downloading if necessary)
__magic_name__ = get_from_cache(
snake_case_ , cache_dir=snake_case_ , force_download=snake_case_ , proxies=snake_case_ , resume_download=snake_case_ , user_agent=snake_case_ , local_files_only=snake_case_ , )
elif os.path.exists(snake_case_ ):
# File, and it exists.
__magic_name__ = url_or_filename
elif urlparse(snake_case_ ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError('''file {} not found'''.format(snake_case_ ) )
else:
# Something unknown
raise ValueError('''unable to parse {} as a URL or as a local path'''.format(snake_case_ ) )
if extract_compressed_file:
if not is_zipfile(snake_case_ ) and not tarfile.is_tarfile(snake_case_ ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
__magic_name__ , __magic_name__ = os.path.split(snake_case_ )
__magic_name__ = output_file.replace('''.''' , '''-''' ) + '''-extracted'''
__magic_name__ = os.path.join(snake_case_ , snake_case_ )
if os.path.isdir(snake_case_ ) and os.listdir(snake_case_ ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
__magic_name__ = output_path + '''.lock'''
with FileLock(snake_case_ ):
shutil.rmtree(snake_case_ , ignore_errors=snake_case_ )
os.makedirs(snake_case_ )
if is_zipfile(snake_case_ ):
with ZipFile(snake_case_ , '''r''' ) as zip_file:
zip_file.extractall(snake_case_ )
zip_file.close()
elif tarfile.is_tarfile(snake_case_ ):
__magic_name__ = tarfile.open(snake_case_ )
tar_file.extractall(snake_case_ )
tar_file.close()
else:
raise EnvironmentError('''Archive format of {} could not be identified'''.format(snake_case_ ) )
return output_path_extracted
return output_path
def _SCREAMING_SNAKE_CASE ( snake_case_ : Dict , snake_case_ : int="," ):
assert isinstance(snake_case_ , snake_case_ )
if os.path.isfile(snake_case_ ):
with open(snake_case_ ) as f:
__magic_name__ = eval(f.read() )
else:
__magic_name__ = requests.get(snake_case_ )
try:
__magic_name__ = requests.json()
except Exception:
__magic_name__ = req.content.decode()
assert data is not None, "could not connect"
try:
__magic_name__ = eval(snake_case_ )
except Exception:
__magic_name__ = data.split('''\n''' )
req.close()
return data
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] ):
__magic_name__ = requests.get(snake_case_ )
__magic_name__ = np.array(Image.open(BytesIO(response.content ) ) )
return img
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] ):
__magic_name__ = url.split('''/''' )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(snake_case_ )
with open(snake_case_ , '''rb''' ) as stream:
__magic_name__ = pkl.load(snake_case_ )
__magic_name__ = weights.pop('''model''' )
__magic_name__ = {}
for k, v in model.items():
__magic_name__ = torch.from_numpy(snake_case_ )
if "running_var" in k:
__magic_name__ = torch.tensor([0] )
__magic_name__ = k.replace('''running_var''' , '''num_batches_tracked''' )
__magic_name__ = zero
return new
def _SCREAMING_SNAKE_CASE ( ):
print(f'{os.path.abspath(os.path.join(snake_case_ , os.pardir ) )}/demo.ipynb' )
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Tuple="RGB" ):
assert isinstance(snake_case_ , snake_case_ )
if os.path.isfile(snake_case_ ):
__magic_name__ = cva.imread(snake_case_ )
else:
__magic_name__ = get_image_from_url(snake_case_ )
assert img is not None, f'could not connect to: {im}'
__magic_name__ = cva.cvtColor(snake_case_ , cva.COLOR_BGR2RGB )
if input_format == "RGB":
__magic_name__ = img[:, :, ::-1]
return img
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Dict=1 ):
return (images[i : i + batch] for i in range(0 , len(snake_case_ ) , snake_case_ )) | 678 | 0 |
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a_ : int = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] ):
__magic_name__ = OrderedDict()
for key, value in state_dict.items():
if key.startswith('''module.encoder''' ):
__magic_name__ = key.replace('''module.encoder''' , '''glpn.encoder''' )
if key.startswith('''module.decoder''' ):
__magic_name__ = key.replace('''module.decoder''' , '''decoder.stages''' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
__magic_name__ = key[key.find('''patch_embed''' ) + len('''patch_embed''' )]
__magic_name__ = key.replace(f'patch_embed{idx}' , f'patch_embeddings.{int(__snake_case )-1}' )
if "norm" in key:
__magic_name__ = key.replace('''norm''' , '''layer_norm''' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
__magic_name__ = key[key.find('''glpn.encoder.layer_norm''' ) + len('''glpn.encoder.layer_norm''' )]
__magic_name__ = key.replace(f'layer_norm{idx}' , f'layer_norm.{int(__snake_case )-1}' )
if "layer_norm1" in key:
__magic_name__ = key.replace('''layer_norm1''' , '''layer_norm_1''' )
if "layer_norm2" in key:
__magic_name__ = key.replace('''layer_norm2''' , '''layer_norm_2''' )
if "block" in key:
# replace for example block1 by block.0
__magic_name__ = key[key.find('''block''' ) + len('''block''' )]
__magic_name__ = key.replace(f'block{idx}' , f'block.{int(__snake_case )-1}' )
if "attn.q" in key:
__magic_name__ = key.replace('''attn.q''' , '''attention.self.query''' )
if "attn.proj" in key:
__magic_name__ = key.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in key:
__magic_name__ = key.replace('''attn''' , '''attention.self''' )
if "fc1" in key:
__magic_name__ = key.replace('''fc1''' , '''dense1''' )
if "fc2" in key:
__magic_name__ = key.replace('''fc2''' , '''dense2''' )
if "linear_pred" in key:
__magic_name__ = key.replace('''linear_pred''' , '''classifier''' )
if "linear_fuse" in key:
__magic_name__ = key.replace('''linear_fuse.conv''' , '''linear_fuse''' )
__magic_name__ = key.replace('''linear_fuse.bn''' , '''batch_norm''' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
__magic_name__ = key[key.find('''linear_c''' ) + len('''linear_c''' )]
__magic_name__ = key.replace(f'linear_c{idx}' , f'linear_c.{int(__snake_case )-1}' )
if "bot_conv" in key:
__magic_name__ = key.replace('''bot_conv''' , '''0.convolution''' )
if "skip_conv1" in key:
__magic_name__ = key.replace('''skip_conv1''' , '''1.convolution''' )
if "skip_conv2" in key:
__magic_name__ = key.replace('''skip_conv2''' , '''2.convolution''' )
if "fusion1" in key:
__magic_name__ = key.replace('''fusion1''' , '''1.fusion''' )
if "fusion2" in key:
__magic_name__ = key.replace('''fusion2''' , '''2.fusion''' )
if "fusion3" in key:
__magic_name__ = key.replace('''fusion3''' , '''3.fusion''' )
if "fusion" in key and "conv" in key:
__magic_name__ = key.replace('''conv''' , '''convolutional_layer''' )
if key.startswith('''module.last_layer_depth''' ):
__magic_name__ = key.replace('''module.last_layer_depth''' , '''head.head''' )
__magic_name__ = value
return new_state_dict
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[str] , snake_case_ : Union[str, Any] ):
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
__magic_name__ = state_dict.pop(f'glpn.encoder.block.{i}.{j}.attention.self.kv.weight' )
__magic_name__ = state_dict.pop(f'glpn.encoder.block.{i}.{j}.attention.self.kv.bias' )
# next, add keys and values (in that order) to the state dict
__magic_name__ = kv_weight[
: config.hidden_sizes[i], :
]
__magic_name__ = kv_bias[: config.hidden_sizes[i]]
__magic_name__ = kv_weight[
config.hidden_sizes[i] :, :
]
__magic_name__ = kv_bias[config.hidden_sizes[i] :]
def _SCREAMING_SNAKE_CASE ( ):
__magic_name__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
__magic_name__ = Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
return image
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] , snake_case_ : List[str] , snake_case_ : Dict=False , snake_case_ : Tuple=None ):
__magic_name__ = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
__magic_name__ = GLPNImageProcessor()
# prepare image
__magic_name__ = prepare_img()
__magic_name__ = image_processor(images=__snake_case , return_tensors='''pt''' ).pixel_values
logger.info('''Converting model...''' )
# load original state dict
__magic_name__ = torch.load(__snake_case , map_location=torch.device('''cpu''' ) )
# rename keys
__magic_name__ = rename_keys(__snake_case )
# key and value matrices need special treatment
read_in_k_v(__snake_case , __snake_case )
# create HuggingFace model and load state dict
__magic_name__ = GLPNForDepthEstimation(__snake_case )
model.load_state_dict(__snake_case )
model.eval()
# forward pass
__magic_name__ = model(__snake_case )
__magic_name__ = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
__magic_name__ = torch.tensor(
[[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] )
elif "kitti" in model_name:
__magic_name__ = torch.tensor(
[[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] )
else:
raise ValueError(f'Unknown model name: {model_name}' )
__magic_name__ = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , __snake_case , atol=1E-4 )
print('''Looks ok!''' )
# finally, push to hub if required
if push_to_hub:
logger.info('''Pushing model and image processor to the hub...''' )
model.push_to_hub(
repo_path_or_name=Path(__snake_case , __snake_case ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=__snake_case , )
image_processor.push_to_hub(
repo_path_or_name=Path(__snake_case , __snake_case ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=__snake_case , )
if __name__ == "__main__":
a_ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path',
default=None,
type=str,
help='Path to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.'
)
parser.add_argument(
'--model_name',
default='glpn-kitti',
type=str,
help='Name of the model in case you\'re pushing to the hub.',
)
a_ : str = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name) | 713 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
a_ : Optional[int] = 16
a_ : int = 32
def _SCREAMING_SNAKE_CASE ( snake_case_ : Accelerator , snake_case_ : int = 16 , snake_case_ : str = "bert-base-cased" ):
__magic_name__ = AutoTokenizer.from_pretrained(snake_case_ )
__magic_name__ = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(snake_case_ : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
__magic_name__ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=snake_case_ , max_length=snake_case_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__magic_name__ = datasets.map(
snake_case_ , batched=snake_case_ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=snake_case_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__magic_name__ = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(snake_case_ : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(snake_case_ , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(snake_case_ , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
__magic_name__ = DataLoader(
tokenized_datasets['''train'''] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ )
__magic_name__ = DataLoader(
tokenized_datasets['''validation'''] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ )
return train_dataloader, eval_dataloader
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : str ):
model.eval()
__magic_name__ = 0
for step, batch in enumerate(snake_case_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__magic_name__ = model(**snake_case_ )
__magic_name__ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__magic_name__ , __magic_name__ = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(snake_case_ ) - 1:
__magic_name__ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__magic_name__ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=snake_case_ , references=snake_case_ , )
__magic_name__ = metric.compute()
return eval_metric["accuracy"]
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Tuple ):
# Initialize accelerator
__magic_name__ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__magic_name__ = config['''lr''']
__magic_name__ = int(config['''num_epochs'''] )
__magic_name__ = int(config['''seed'''] )
__magic_name__ = int(config['''batch_size'''] )
__magic_name__ = args.model_name_or_path
set_seed(snake_case_ )
__magic_name__ , __magic_name__ = get_dataloaders(snake_case_ , snake_case_ , snake_case_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__magic_name__ = AutoModelForSequenceClassification.from_pretrained(snake_case_ , return_dict=snake_case_ )
# Instantiate optimizer
__magic_name__ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__magic_name__ = optimizer_cls(params=model.parameters() , lr=snake_case_ )
if accelerator.state.deepspeed_plugin is not None:
__magic_name__ = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
__magic_name__ = 1
__magic_name__ = (len(snake_case_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__magic_name__ = get_linear_schedule_with_warmup(
optimizer=snake_case_ , num_warmup_steps=0 , num_training_steps=snake_case_ , )
else:
__magic_name__ = DummyScheduler(snake_case_ , total_num_steps=snake_case_ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = accelerator.prepare(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# We need to keep track of how many total steps we have iterated over
__magic_name__ = 0
# We also need to keep track of the stating epoch so files are named properly
__magic_name__ = 0
__magic_name__ = evaluate.load('''glue''' , '''mrpc''' )
__magic_name__ = num_epochs
if args.partial_train_epoch is not None:
__magic_name__ = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
__magic_name__ = args.resume_from_checkpoint.split('''epoch_''' )[1]
__magic_name__ = ''''''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
__magic_name__ = int(snake_case_ ) + 1
__magic_name__ = evaluation_loop(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
accelerator.print('''resumed checkpoint performance:''' , snake_case_ )
accelerator.print('''resumed checkpoint\'s scheduler\'s lr:''' , lr_scheduler.get_lr()[0] )
accelerator.print('''resumed optimizers\'s lr:''' , optimizer.param_groups[0]['''lr'''] )
with open(os.path.join(args.output_dir , f'state_{starting_epoch-1}.json' ) , '''r''' ) as f:
__magic_name__ = json.load(snake_case_ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
__magic_name__ = {}
for epoch in range(snake_case_ , snake_case_ ):
model.train()
for step, batch in enumerate(snake_case_ ):
__magic_name__ = model(**snake_case_ )
__magic_name__ = outputs.loss
__magic_name__ = loss / gradient_accumulation_steps
accelerator.backward(snake_case_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
__magic_name__ = f'epoch_{epoch}'
__magic_name__ = os.path.join(args.output_dir , snake_case_ )
accelerator.save_state(snake_case_ )
__magic_name__ = evaluation_loop(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
__magic_name__ = accuracy
__magic_name__ = lr_scheduler.get_lr()[0]
__magic_name__ = optimizer.param_groups[0]['''lr''']
__magic_name__ = epoch
__magic_name__ = overall_step
accelerator.print(f'epoch {epoch}:' , snake_case_ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f'state_{epoch}.json' ) , '''w''' ) as f:
json.dump(snake_case_ , snake_case_ )
def _SCREAMING_SNAKE_CASE ( ):
__magic_name__ = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=snake_case_ , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=snake_case_ , )
parser.add_argument(
'''--output_dir''' , type=snake_case_ , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--resume_from_checkpoint''' , type=snake_case_ , default=snake_case_ , help='''If the training should continue from a checkpoint folder.''' , )
parser.add_argument(
'''--partial_train_epoch''' , type=snake_case_ , default=snake_case_ , help='''If passed, the training will stop after this number of epochs.''' , )
parser.add_argument(
'''--num_epochs''' , type=snake_case_ , default=2 , help='''Number of train epochs.''' , )
__magic_name__ = parser.parse_args()
__magic_name__ = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(snake_case_ , snake_case_ )
if __name__ == "__main__":
main() | 678 | 0 |
class SCREAMING_SNAKE_CASE_ ( __lowercase ):
"""simple docstring"""
pass
class SCREAMING_SNAKE_CASE_ ( __lowercase ):
"""simple docstring"""
pass
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ = [
[],
[],
[],
]
def __A ( self , A , A ) -> None:
'''simple docstring'''
try:
if len(self.queues[priority] ) >= 1_00:
raise OverflowError('''Maximum queue size is 100''' )
self.queues[priority].append(__A )
except IndexError:
raise ValueError('''Valid priorities are 0, 1, and 2''' )
def __A ( self ) -> int:
'''simple docstring'''
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError('''All queues are empty''' )
def __str__( self ) -> str:
'''simple docstring'''
return "\n".join(F'Priority {i}: {q}' for i, q in enumerate(self.queues ) )
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self ) -> Dict:
'''simple docstring'''
__magic_name__ = []
def __A ( self , A ) -> None:
'''simple docstring'''
if len(self.queue ) == 1_00:
raise OverFlowError('''Maximum queue size is 100''' )
self.queue.append(__A )
def __A ( self ) -> int:
'''simple docstring'''
if not self.queue:
raise UnderFlowError('''The queue is empty''' )
else:
__magic_name__ = min(self.queue )
self.queue.remove(__A )
return data
def __str__( self ) -> str:
'''simple docstring'''
return str(self.queue )
def _SCREAMING_SNAKE_CASE ( ):
__magic_name__ = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 100 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 128 )
print(a__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(a__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def _SCREAMING_SNAKE_CASE ( ):
__magic_name__ = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(a__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(a__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue() | 714 |
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
return " ".join(
''''''.join(word[::-1] ) if len(snake_case_ ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('Hey wollef sroirraw')) | 678 | 0 |
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ ):
"""simple docstring"""
def __A ( self ) -> Any:
'''simple docstring'''
__magic_name__ = tempfile.mkdtemp()
__magic_name__ = 8
# DPR tok
__magic_name__ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__magic_name__ = os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(__A , exist_ok=__A )
__magic_name__ = os.path.join(__A , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
__magic_name__ = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
__magic_name__ = dict(zip(__A , range(len(__A ) ) ) )
__magic_name__ = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__magic_name__ = {'''unk_token''': '''<unk>'''}
__magic_name__ = os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(__A , exist_ok=__A )
__magic_name__ = os.path.join(__A , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
__magic_name__ = os.path.join(__A , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__A ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__A ) )
def __A ( self ) -> DPRQuestionEncoderTokenizer:
'''simple docstring'''
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def __A ( self ) -> DPRContextEncoderTokenizer:
'''simple docstring'''
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def __A ( self ) -> BartTokenizer:
'''simple docstring'''
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def __A ( self ) -> List[Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def __A ( self ) -> str:
'''simple docstring'''
__magic_name__ = self.get_dummy_dataset()
__magic_name__ = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
__magic_name__ = dataset
__magic_name__ = RagRetriever(
__A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def __A ( self , A ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = self.get_dummy_dataset()
__magic_name__ = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , )
if from_disk:
__magic_name__ = os.path.join(self.tmpdirname , '''dataset''' )
__magic_name__ = os.path.join(self.tmpdirname , '''index.faiss''' )
dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) )
dataset.drop_index('''embeddings''' )
dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) )
del dataset
__magic_name__ = RagRetriever(
__A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
__magic_name__ = RagRetriever(
__A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , __A ) , )
return retriever
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
__magic_name__ = os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' )
dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' )
pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) )
__magic_name__ = os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' )
__magic_name__ = {sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset}
pickle.dump(__A , open(__A , '''wb''' ) )
__magic_name__ = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , )
__magic_name__ = RagRetriever(
__A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = 1
__magic_name__ = self.get_dummy_canonical_hf_index_retriever()
__magic_name__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__magic_name__ , __magic_name__ , __magic_name__ = retriever.retrieve(__A , n_docs=__A )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__A ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __A )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __A ( self ) -> Any:
'''simple docstring'''
__magic_name__ = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
__magic_name__ = self.get_dummy_dataset()
retriever.save_pretrained(__A )
__magic_name__ = RagRetriever.from_pretrained(__A )
self.assertIsInstance(__A , __A )
__magic_name__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__magic_name__ = retriever.retrieve(__A , n_docs=1 )
self.assertTrue(out is not None )
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ = 1
__magic_name__ = self.get_dummy_custom_hf_index_retriever(from_disk=__A )
__magic_name__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__magic_name__ , __magic_name__ , __magic_name__ = retriever.retrieve(__A , n_docs=__A )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__A ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __A )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __A ( self ) -> int:
'''simple docstring'''
__magic_name__ = self.get_dummy_custom_hf_index_retriever(from_disk=__A )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__A )
__magic_name__ = RagRetriever.from_pretrained(__A )
self.assertIsInstance(__A , __A )
__magic_name__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__magic_name__ = retriever.retrieve(__A , n_docs=1 )
self.assertTrue(out is not None )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = 1
__magic_name__ = self.get_dummy_custom_hf_index_retriever(from_disk=__A )
__magic_name__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__magic_name__ , __magic_name__ , __magic_name__ = retriever.retrieve(__A , n_docs=__A )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__A ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __A )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __A ( self ) -> List[str]:
'''simple docstring'''
__magic_name__ = self.get_dummy_custom_hf_index_retriever(from_disk=__A )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__A )
__magic_name__ = RagRetriever.from_pretrained(__A )
self.assertIsInstance(__A , __A )
__magic_name__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__magic_name__ = retriever.retrieve(__A , n_docs=1 )
self.assertTrue(out is not None )
def __A ( self ) -> int:
'''simple docstring'''
__magic_name__ = 1
__magic_name__ = self.get_dummy_legacy_index_retriever()
__magic_name__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__magic_name__ , __magic_name__ , __magic_name__ = retriever.retrieve(__A , n_docs=__A )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__A ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''text'''] ) , __A )
self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__A )
__magic_name__ = RagRetriever.from_pretrained(__A )
self.assertIsInstance(__A , __A )
__magic_name__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__magic_name__ = retriever.retrieve(__A , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def __A ( self ) -> Tuple:
'''simple docstring'''
import torch
__magic_name__ = 1
__magic_name__ = self.get_dummy_canonical_hf_index_retriever()
__magic_name__ = [[5, 7], [10, 11]]
__magic_name__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__magic_name__ = retriever(__A , __A , prefix=retriever.config.generator.prefix , n_docs=__A )
__magic_name__ , __magic_name__ , __magic_name__ = (
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__A , __A )
self.assertIsInstance(__A , __A )
self.assertIsInstance(__A , np.ndarray )
__magic_name__ = retriever(
__A , __A , prefix=retriever.config.generator.prefix , n_docs=__A , return_tensors='''pt''' , )
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = ( # noqa: F841
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
out['''doc_ids'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__A , torch.Tensor )
self.assertIsInstance(__A , torch.Tensor )
self.assertIsInstance(__A , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def __A ( self ) -> int:
'''simple docstring'''
__magic_name__ = self.get_dpr_ctx_encoder_tokenizer()
__magic_name__ = 1
__magic_name__ = self.get_dummy_custom_hf_index_retriever(from_disk=__A )
retriever.set_ctx_encoder_tokenizer(__A )
__magic_name__ = [[5, 7], [10, 11]]
__magic_name__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__magic_name__ = retriever(__A , __A , prefix=retriever.config.generator.prefix , n_docs=__A )
self.assertEqual(
len(__A ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , __A ) # check for doc token related keys in dictionary.
| 715 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
a_ : Any = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n'
a_ : int = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n'
a_ : List[str] = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE_ ( datasets.Metric ):
"""simple docstring"""
def __A ( self ) -> List[Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/krishnap25/mauve''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/krishnap25/mauve'''] , reference_urls=[
'''https://arxiv.org/abs/2102.01454''',
'''https://github.com/krishnap25/mauve''',
] , )
def __A ( self , A , A , A=None , A=None , A=None , A=None , A="auto" , A=-1 , A=0.9 , A=5 , A=5_00 , A="gpt2-large" , A=-1 , A=10_24 , A=25 , A=5 , A=True , A=25 , ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = compute_mauve(
p_text=A , q_text=A , p_features=A , q_features=A , p_tokens=A , q_tokens=A , num_buckets=A , pca_max_data=A , kmeans_explained_var=A , kmeans_num_redo=A , kmeans_max_iter=A , featurize_model_name=A , device_id=A , max_text_length=A , divergence_curve_discretization_size=A , mauve_scaling_factor=A , verbose=A , seed=A , )
return out | 678 | 0 |
import re
from filelock import FileLock
try:
import nltk
a_ : List[Any] = True
except (ImportError, ModuleNotFoundError):
a_ : Optional[Any] = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
re.sub('''<n>''' , '''''' , _UpperCAmelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(_UpperCAmelCase ) ) | 716 |
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
a_ : Tuple = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
a_ : List[str] = typing.Union[np.floataa, int, float] # noqa: UP007
def _SCREAMING_SNAKE_CASE ( snake_case_ : Vector , snake_case_ : Vector ):
return np.sqrt(np.sum((np.asarray(snake_case_ ) - np.asarray(snake_case_ )) ** 2 ) )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Vector , snake_case_ : Vector ):
return sum((va - va) ** 2 for va, va in zip(snake_case_ , snake_case_ ) ) ** (1 / 2)
if __name__ == "__main__":
def _SCREAMING_SNAKE_CASE ( ):
from timeit import timeit
print('''Without Numpy''' )
print(
timeit(
'''euclidean_distance_no_np([1, 2, 3], [4, 5, 6])''' , number=1_0000 , globals=globals() , ) )
print('''With Numpy''' )
print(
timeit(
'''euclidean_distance([1, 2, 3], [4, 5, 6])''' , number=1_0000 , globals=globals() , ) )
benchmark() | 678 | 0 |
import os
# Precomputes a list of the 100 first triangular numbers
a_ : int = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def _SCREAMING_SNAKE_CASE ( ):
__magic_name__ = os.path.dirname(os.path.realpath(snake_case_ ) )
__magic_name__ = os.path.join(snake_case_ , '''words.txt''' )
__magic_name__ = ''
with open(snake_case_ ) as f:
__magic_name__ = f.readline()
__magic_name__ = [word.strip('''"''' ) for word in words.strip('''\r\n''' ).split(''',''' )]
__magic_name__ = [
word
for word in [sum(ord(snake_case_ ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(snake_case_ )
if __name__ == "__main__":
print(solution())
| 717 |
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
a_ : str = logging.get_logger(__name__)
a_ : Union[str, Any] = 'https://openaipublic.azureedge.net/jukebox/models/'
a_ : List[Any] = {
'jukebox-1b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'1b_lyrics/prior_level_2.pth.tar',
],
'jukebox-5b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'5b_lyrics/prior_level_2.pth.tar',
],
}
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] ):
if key.endswith('''.model.1.bias''' ) and len(key.split('''.''' ) ) > 10:
__magic_name__ = key.replace('''.model.1.bias''' , '''.conv1d_1.bias''' )
elif key.endswith('''.model.1.weight''' ) and len(key.split('''.''' ) ) > 10:
__magic_name__ = key.replace('''.model.1.weight''' , '''.conv1d_1.weight''' )
elif key.endswith('''.model.3.bias''' ) and len(key.split('''.''' ) ) > 10:
__magic_name__ = key.replace('''.model.3.bias''' , '''.conv1d_2.bias''' )
elif key.endswith('''.model.3.weight''' ) and len(key.split('''.''' ) ) > 10:
__magic_name__ = key.replace('''.model.3.weight''' , '''.conv1d_2.weight''' )
if "conditioner_blocks.0." in key:
__magic_name__ = key.replace('''conditioner_blocks.0''' , '''conditioner_blocks''' )
if "prime_prior" in key:
__magic_name__ = key.replace('''prime_prior''' , '''encoder''' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
__magic_name__ = key.replace('''.emb.''' , '''.''' )
if key.endswith('''k''' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('''.k''' , '''.codebook''' )
if "y_emb." in key:
return key.replace('''y_emb.''' , '''metadata_embedding.''' )
if "x_emb.emb." in key:
__magic_name__ = key.replace('''0.x_emb.emb''' , '''embed_tokens''' )
if "prime_state_ln" in key:
return key.replace('''prime_state_ln''' , '''encoder.final_layer_norm''' )
if ".ln" in key:
return key.replace('''.ln''' , '''.layer_norm''' )
if "_ln" in key:
return key.replace('''_ln''' , '''_layer_norm''' )
if "prime_state_proj" in key:
return key.replace('''prime_state_proj''' , '''encoder.proj_in''' )
if "prime_x_out" in key:
return key.replace('''prime_x_out''' , '''encoder.lm_head''' )
if "prior.x_out" in key:
return key.replace('''x_out''' , '''fc_proj_out''' )
if "x_emb" in key:
return key.replace('''x_emb''' , '''embed_tokens''' )
return key
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : Optional[int] ):
__magic_name__ = {}
import re
__magic_name__ = re.compile(r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
__magic_name__ = re.compile(
r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
__magic_name__ = re.compile(r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
__magic_name__ = re.compile(r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
__magic_name__ = re.compile(
r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
__magic_name__ = re.compile(r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
__magic_name__ = re.compile(r'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)''' )
__magic_name__ = re.compile(
r'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
__magic_name__ = re.compile(r'''conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)''' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(snake_case_ ):
__magic_name__ = re_encoder_block_conv_in.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[2] ) * 2 + int(groups[3] )
__magic_name__ = f'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'
__magic_name__ = re_encoder_block_conv_in.sub(snake_case_ , snake_case_ )
elif re_encoder_block_resnet.fullmatch(snake_case_ ):
__magic_name__ = re_encoder_block_resnet.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[2] ) * 2 + int(groups[3] )
__magic_name__ = {'''1''': 1, '''3''': 2}[groups[-2]]
__magic_name__ = f'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'
__magic_name__ = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
__magic_name__ = prefix + resnet_block
__magic_name__ = re_encoder_block_resnet.sub(snake_case_ , snake_case_ )
elif re_encoder_block_proj_out.fullmatch(snake_case_ ):
__magic_name__ = re_encoder_block_proj_out.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = f'encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'
__magic_name__ = re_encoder_block_proj_out.sub(snake_case_ , snake_case_ )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(snake_case_ ):
__magic_name__ = re_decoder_block_conv_out.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[2] ) * 2 + int(groups[3] ) - 2
__magic_name__ = f'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'
__magic_name__ = re_decoder_block_conv_out.sub(snake_case_ , snake_case_ )
elif re_decoder_block_resnet.fullmatch(snake_case_ ):
__magic_name__ = re_decoder_block_resnet.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[2] ) * 2 + int(groups[3] ) - 2
__magic_name__ = {'''1''': 1, '''3''': 2}[groups[-2]]
__magic_name__ = f'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'
__magic_name__ = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
__magic_name__ = prefix + resnet_block
__magic_name__ = re_decoder_block_resnet.sub(snake_case_ , snake_case_ )
elif re_decoder_block_proj_in.fullmatch(snake_case_ ):
__magic_name__ = re_decoder_block_proj_in.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = f'decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'
__magic_name__ = re_decoder_block_proj_in.sub(snake_case_ , snake_case_ )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(snake_case_ ):
__magic_name__ = re_prior_cond_conv_out.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[1] ) * 2 + int(groups[2] ) - 2
__magic_name__ = f'conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'
__magic_name__ = re_prior_cond_conv_out.sub(snake_case_ , snake_case_ )
elif re_prior_cond_resnet.fullmatch(snake_case_ ):
__magic_name__ = re_prior_cond_resnet.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[1] ) * 2 + int(groups[2] ) - 2
__magic_name__ = {'''1''': 1, '''3''': 2}[groups[-2]]
__magic_name__ = f'conditioner_blocks.upsampler.upsample_block.{block_index}.'
__magic_name__ = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
__magic_name__ = prefix + resnet_block
__magic_name__ = re_prior_cond_resnet.sub(snake_case_ , snake_case_ )
elif re_prior_cond_proj_in.fullmatch(snake_case_ ):
__magic_name__ = re_prior_cond_proj_in.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = f'conditioner_blocks.upsampler.proj_in.{groups[-1]}'
__magic_name__ = re_prior_cond_proj_in.sub(snake_case_ , snake_case_ )
# keep original key
else:
__magic_name__ = original_key
__magic_name__ = replace_key(snake_case_ )
if f'{key_prefix}.{key}' not in model_state_dict or key is None:
print(f'failed converting {original_key} to {key}, does not match' )
# handle missmatched shape
elif value.shape != model_state_dict[f'{key_prefix}.{key}'].shape:
__magic_name__ = model_state_dict[f'{key_prefix}.{key}']
print(f'{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match' )
__magic_name__ = original_key
__magic_name__ = original_key
__magic_name__ = value
return new_dict
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( snake_case_ : Dict=None , snake_case_ : Any=None ):
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' ):
__magic_name__ = requests.get(f'{PREFIX}{file}' , allow_redirects=snake_case_ )
os.makedirs(f'{pytorch_dump_folder_path}/' , exist_ok=snake_case_ )
open(f'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' , '''wb''' ).write(r.content )
__magic_name__ = MODEL_MAPPING[model_name.split('''/''' )[-1]]
__magic_name__ = JukeboxConfig.from_pretrained(snake_case_ )
__magic_name__ = JukeboxModel(snake_case_ )
__magic_name__ = []
__magic_name__ = {}
for i, dict_name in enumerate(snake_case_ ):
__magic_name__ = torch.load(f'{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}' )['''model''']
__magic_name__ = {}
for k in old_dic.keys():
if k.endswith('''.b''' ):
__magic_name__ = old_dic[k]
elif k.endswith('''.w''' ):
__magic_name__ = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
__magic_name__ = old_dic[k]
else:
__magic_name__ = old_dic[k]
__magic_name__ = '''vqvae''' if i == 0 else f'priors.{3 - i}'
__magic_name__ = fix_jukebox_keys(snake_case_ , model.state_dict() , snake_case_ , snake_case_ )
weight_dict.append(snake_case_ )
__magic_name__ = weight_dict.pop(0 )
model.vqvae.load_state_dict(snake_case_ )
for i in range(len(snake_case_ ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
with open(f'{pytorch_dump_folder_path}/mapping.json' , '''w''' ) as txtfile:
json.dump(snake_case_ , snake_case_ )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case_ )
return weight_dict
if __name__ == "__main__":
a_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='jukebox-5b-lyrics',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='jukebox-5b-lyrics-converted',
type=str,
help='Path to the output PyTorch model directory.',
)
a_ : int = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path) | 678 | 0 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def _SCREAMING_SNAKE_CASE ( ):
__magic_name__ = ArgumentParser(
description=(
'''PyTorch TPU distributed training launch '''
'''helper utility that will spawn up '''
'''multiple distributed processes'''
) )
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''' , type=_lowercase , default=1 , help='''Number of TPU cores to use (1 or 8).''' )
# positional
parser.add_argument(
'''training_script''' , type=_lowercase , help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
) , )
# rest from the training program
parser.add_argument('''training_script_args''' , nargs=_lowercase )
return parser.parse_args()
def _SCREAMING_SNAKE_CASE ( ):
__magic_name__ = parse_args()
# Import training_script as a module.
__magic_name__ = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__magic_name__ = script_fpath.stem
__magic_name__ = importlib.import_module(_lowercase )
# Patch sys.argv
__magic_name__ = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main() | 718 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a_ : int = logging.get_logger(__name__)
a_ : Optional[int] = {
'microsoft/table-transformer-detection': (
'https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'
),
}
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = """table-transformer"""
_a = ["""past_key_values"""]
_a = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , A=True , A=None , A=3 , A=1_00 , A=6 , A=20_48 , A=8 , A=6 , A=20_48 , A=8 , A=0.0 , A=0.0 , A=True , A="relu" , A=2_56 , A=0.1 , A=0.0 , A=0.0 , A=0.02 , A=1.0 , A=False , A="sine" , A="resnet50" , A=True , A=False , A=1 , A=5 , A=2 , A=1 , A=1 , A=5 , A=2 , A=0.1 , **A , ) -> Any:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
__magic_name__ = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(A , A ):
__magic_name__ = backbone_config.get('''model_type''' )
__magic_name__ = CONFIG_MAPPING[backbone_model_type]
__magic_name__ = config_class.from_dict(A )
# set timm attributes to None
__magic_name__ , __magic_name__ , __magic_name__ = None, None, None
__magic_name__ = use_timm_backbone
__magic_name__ = backbone_config
__magic_name__ = num_channels
__magic_name__ = num_queries
__magic_name__ = d_model
__magic_name__ = encoder_ffn_dim
__magic_name__ = encoder_layers
__magic_name__ = encoder_attention_heads
__magic_name__ = decoder_ffn_dim
__magic_name__ = decoder_layers
__magic_name__ = decoder_attention_heads
__magic_name__ = dropout
__magic_name__ = attention_dropout
__magic_name__ = activation_dropout
__magic_name__ = activation_function
__magic_name__ = init_std
__magic_name__ = init_xavier_std
__magic_name__ = encoder_layerdrop
__magic_name__ = decoder_layerdrop
__magic_name__ = encoder_layers
__magic_name__ = auxiliary_loss
__magic_name__ = position_embedding_type
__magic_name__ = backbone
__magic_name__ = use_pretrained_backbone
__magic_name__ = dilation
# Hungarian matcher
__magic_name__ = class_cost
__magic_name__ = bbox_cost
__magic_name__ = giou_cost
# Loss coefficients
__magic_name__ = mask_loss_coefficient
__magic_name__ = dice_loss_coefficient
__magic_name__ = bbox_loss_coefficient
__magic_name__ = giou_loss_coefficient
__magic_name__ = eos_coefficient
super().__init__(is_encoder_decoder=A , **A )
@property
def __A ( self ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def __A ( self ) -> int:
'''simple docstring'''
return self.d_model
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = version.parse("""1.11""" )
@property
def __A ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def __A ( self ) -> float:
'''simple docstring'''
return 1E-5
@property
def __A ( self ) -> int:
'''simple docstring'''
return 12 | 678 | 0 |
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
a_ : List[Any] = logging.get_logger(__name__)
a_ : Optional[Any] = [
['attention', 'attn'],
['encoder_attention', 'encoder_attn'],
['q_lin', 'q_proj'],
['k_lin', 'k_proj'],
['v_lin', 'v_proj'],
['out_lin', 'out_proj'],
['norm_embeddings', 'layernorm_embedding'],
['position_embeddings', 'embed_positions'],
['embeddings', 'embed_tokens'],
['ffn.lin', 'fc'],
]
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] ):
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
__magic_name__ = k.replace(_lowerCamelCase , _lowerCamelCase )
if k.startswith('''encoder''' ):
__magic_name__ = k.replace('''.attn''' , '''.self_attn''' )
__magic_name__ = k.replace('''norm1''' , '''self_attn_layer_norm''' )
__magic_name__ = k.replace('''norm2''' , '''final_layer_norm''' )
elif k.startswith('''decoder''' ):
__magic_name__ = k.replace('''norm1''' , '''self_attn_layer_norm''' )
__magic_name__ = k.replace('''norm2''' , '''encoder_attn_layer_norm''' )
__magic_name__ = k.replace('''norm3''' , '''final_layer_norm''' )
return k
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] ):
__magic_name__ = [
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
__magic_name__ = sd.pop(_lowerCamelCase )
__magic_name__ = k.replace('''layernorm_embedding''' , '''layer_norm''' )
assert new_k not in sd
__magic_name__ = v
a_ : List[str] = ['START']
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : Tuple , snake_case_ : Optional[int] ):
__magic_name__ = torch.load(_lowerCamelCase , map_location='''cpu''' )
__magic_name__ = model["""model"""]
__magic_name__ = BlenderbotConfig.from_json_file(_lowerCamelCase )
__magic_name__ = BlenderbotForConditionalGeneration(_lowerCamelCase )
__magic_name__ = m.model.state_dict().keys()
__magic_name__ = []
__magic_name__ = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
__magic_name__ = rename_state_dict_key(_lowerCamelCase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
__magic_name__ = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(_lowerCamelCase )
m.model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
m.half()
m.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
a_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--src_path', type=str, help='like blenderbot-model.bin')
parser.add_argument('--save_dir', default='hf_blenderbot', type=str, help='Where to save converted model.')
parser.add_argument(
'--hf_config_json', default='blenderbot-3b-config.json', type=str, help='Path to config to use'
)
a_ : Any = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json) | 719 |
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Optional[int] , snake_case_ : Union[str, Any] ):
# Initialise PyTorch model
__magic_name__ = LxmertConfig.from_json_file(snake_case_ )
print(f'Building PyTorch model from configuration: {config}' )
__magic_name__ = LxmertForPreTraining(snake_case_ )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(snake_case_ , snake_case_ , snake_case_ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , snake_case_ )
if __name__ == "__main__":
a_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a_ : Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path) | 678 | 0 |
'''simple docstring'''
a_ : Optional[int] = {
'Pillow': 'Pillow',
'accelerate': 'accelerate>=0.11.0',
'compel': 'compel==0.1.8',
'black': 'black~=23.1',
'datasets': 'datasets',
'filelock': 'filelock',
'flax': 'flax>=0.4.1',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.13.2',
'requests-mock': 'requests-mock==1.10.0',
'importlib_metadata': 'importlib_metadata',
'invisible-watermark': 'invisible-watermark',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2',
'jaxlib': 'jaxlib>=0.1.65',
'Jinja2': 'Jinja2',
'k-diffusion': 'k-diffusion>=0.0.12',
'torchsde': 'torchsde',
'note_seq': 'note_seq',
'librosa': 'librosa',
'numpy': 'numpy',
'omegaconf': 'omegaconf',
'parameterized': 'parameterized',
'protobuf': 'protobuf>=3.20.3,<4',
'pytest': 'pytest',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'ruff': 'ruff>=0.0.241',
'safetensors': 'safetensors',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'scipy': 'scipy',
'onnx': 'onnx',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'tensorboard': 'tensorboard',
'torch': 'torch>=1.4',
'torchvision': 'torchvision',
'transformers': 'transformers>=4.25.1',
'urllib3': 'urllib3<=2.0.0',
} | 720 |
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def _SCREAMING_SNAKE_CASE ( snake_case_ : Tuple , snake_case_ : Union[str, Any] , snake_case_ : List[str] , snake_case_ : Union[str, Any] ):
__magic_name__ = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, nicht wahr?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
__magic_name__ = {
'''wmt16-en-de-dist-12-1''': [28.3, 27.52],
'''wmt16-en-de-dist-6-1''': [27.4, 27.11],
'''wmt16-en-de-12-1''': [26.9, 25.75],
}
__magic_name__ = f'{src_lang}-{tgt_lang}'
__magic_name__ = f'\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "allenai/{model_name}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n'
model_card_dir.mkdir(parents=snake_case_ , exist_ok=snake_case_ )
__magic_name__ = os.path.join(snake_case_ , '''README.md''' )
print(f'Generating {path}' )
with open(snake_case_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(snake_case_ )
# make sure we are under the root of the project
a_ : Tuple = Path(__file__).resolve().parent.parent.parent
a_ : Dict = repo_dir / 'model_cards'
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
a_ : List[str] = model_cards_dir / 'allenai' / model_name
write_model_card(model_card_dir, src_lang='en', tgt_lang='de', model_name=model_name) | 678 | 0 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : List[str] , snake_case_ : Optional[int] ):
# Initialise PyTorch model
__magic_name__ = MobileBertConfig.from_json_file(lowerCAmelCase__ )
print(f'Building PyTorch model from configuration: {config}' )
__magic_name__ = MobileBertForPreTraining(lowerCAmelCase__ )
# Load weights from tf checkpoint
__magic_name__ = load_tf_weights_in_mobilebert(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , lowerCAmelCase__ )
if __name__ == "__main__":
a_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--mobilebert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained MobileBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a_ : Optional[int] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path) | 721 |
def _SCREAMING_SNAKE_CASE ( snake_case_ : list[int] , snake_case_ : list[int] ):
__magic_name__ = len(snake_case_ )
print('''The following activities are selected:''' )
# The first activity is always selected
__magic_name__ = 0
print(snake_case_ , end=''',''' )
# Consider rest of the activities
for j in range(snake_case_ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(snake_case_ , end=''',''' )
__magic_name__ = j
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ : Dict = [1, 3, 0, 5, 8, 5]
a_ : Union[str, Any] = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish) | 678 | 0 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
a_ : Any = {"""configuration_dpt""": ["""DPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DPTConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[Any] = ["""DPTFeatureExtractor"""]
a_ : int = ["""DPTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Tuple = [
"""DPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DPTForDepthEstimation""",
"""DPTForSemanticSegmentation""",
"""DPTModel""",
"""DPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
a_ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 700 |
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
a_ : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
a_ : List[str] = 256
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = ["""melgan"""]
def __init__( self , A , A , A , A , A , ) -> None:
'''simple docstring'''
super().__init__()
# From MELGAN
__magic_name__ = math.log(1E-5 ) # Matches MelGAN training.
__magic_name__ = 4.0 # Largest value for most examples
__magic_name__ = 1_28
self.register_modules(
notes_encoder=A , continuous_encoder=A , decoder=A , scheduler=A , melgan=A , )
def __A ( self , A , A=(-1.0, 1.0) , A=False ) -> List[Any]:
'''simple docstring'''
__magic_name__ , __magic_name__ = output_range
if clip:
__magic_name__ = torch.clip(A , self.min_value , self.max_value )
# Scale to [0, 1].
__magic_name__ = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def __A ( self , A , A=(-1.0, 1.0) , A=False ) -> Optional[int]:
'''simple docstring'''
__magic_name__ , __magic_name__ = input_range
__magic_name__ = torch.clip(A , A , A ) if clip else outputs
# Scale to [0, 1].
__magic_name__ = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def __A ( self , A , A , A ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = input_tokens > 0
__magic_name__ , __magic_name__ = self.notes_encoder(
encoder_input_tokens=A , encoder_inputs_mask=A )
__magic_name__ , __magic_name__ = self.continuous_encoder(
encoder_inputs=A , encoder_inputs_mask=A )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def __A ( self , A , A , A ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = noise_time
if not torch.is_tensor(A ):
__magic_name__ = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(A ) and len(timesteps.shape ) == 0:
__magic_name__ = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__magic_name__ = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
__magic_name__ = self.decoder(
encodings_and_masks=A , decoder_input_tokens=A , decoder_noise_time=A )
return logits
@torch.no_grad()
def __call__( self , A , A = None , A = 1_00 , A = True , A = "numpy" , A = None , A = 1 , ) -> Union[AudioPipelineOutput, Tuple]:
'''simple docstring'''
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A , A ) or callback_steps <= 0)
):
raise ValueError(
F'`callback_steps` has to be a positive integer but is {callback_steps} of type'
F' {type(A )}.' )
__magic_name__ = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
__magic_name__ = np.zeros([1, 0, self.n_dims] , np.floataa )
__magic_name__ = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=A , device=self.device )
for i, encoder_input_tokens in enumerate(A ):
if i == 0:
__magic_name__ = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
__magic_name__ = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=A , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
__magic_name__ = ones
__magic_name__ = self.scale_features(
A , output_range=[-1.0, 1.0] , clip=A )
__magic_name__ = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=A , continuous_mask=A , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
__magic_name__ = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=A , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(A )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__magic_name__ = self.decode(
encodings_and_masks=A , input_tokens=A , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
__magic_name__ = self.scheduler.step(A , A , A , generator=A ).prev_sample
__magic_name__ = self.scale_to_features(A , input_range=[-1.0, 1.0] )
__magic_name__ = mel[:1]
__magic_name__ = mel.cpu().float().numpy()
__magic_name__ = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A , A )
logger.info('''Generated segment''' , A )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'''Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.''' )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'''Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.''' )
if output_type == "numpy":
__magic_name__ = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
__magic_name__ = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=A ) | 678 | 0 |
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
a_ : Union[str, Any] = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
a_ : List[Any] = typing.Union[np.floataa, int, float] # noqa: UP007
def _SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : Optional[Any] ):
return np.sqrt(np.sum((np.asarray(snake_case_ ) - np.asarray(snake_case_ )) ** 2 ) )
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : List[Any] ):
return sum((va - va) ** 2 for va, va in zip(snake_case_ , snake_case_ ) ) ** (1 / 2)
if __name__ == "__main__":
def _SCREAMING_SNAKE_CASE ( ):
from timeit import timeit
print('''Without Numpy''' )
print(
timeit(
'''euclidean_distance_no_np([1, 2, 3], [4, 5, 6])''' , number=1_0000 , globals=globals() , ) )
print('''With Numpy''' )
print(
timeit(
'''euclidean_distance([1, 2, 3], [4, 5, 6])''' , number=1_0000 , globals=globals() , ) )
benchmark() | 701 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel | 678 | 0 |
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
a_ : List[Any] = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
a_ : Dict = 25_0004
a_ : Dict = 25_0020
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
_a = MBartTokenizer
_a = MBartTokenizerFast
_a = True
_a = True
def __A ( self ) -> Tuple:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__magic_name__ = MBartTokenizer(lowercase__ , keep_accents=lowercase__ )
tokenizer.save_pretrained(self.tmpdirname )
def __A ( self ) -> str:
'''simple docstring'''
__magic_name__ = MBartTokenizer(lowercase__ , keep_accents=lowercase__ )
__magic_name__ = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowercase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase__ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
__magic_name__ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowercase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
__magic_name__ = tokenizer.convert_tokens_to_ids(lowercase__ )
self.assertListEqual(
lowercase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
__magic_name__ = tokenizer.convert_ids_to_tokens(lowercase__ )
self.assertListEqual(
lowercase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def __A ( self ) -> List[Any]:
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
__magic_name__ = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-mbart", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__magic_name__ = self.rust_tokenizer_class.from_pretrained(lowercase__ , **lowercase__ )
__magic_name__ = self.tokenizer_class.from_pretrained(lowercase__ , **lowercase__ )
__magic_name__ = tempfile.mkdtemp()
__magic_name__ = tokenizer_r.save_pretrained(lowercase__ )
__magic_name__ = tokenizer_p.save_pretrained(lowercase__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
__magic_name__ = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(lowercase__ , lowercase__ )
# Checks everything loads correctly in the same way
__magic_name__ = tokenizer_r.from_pretrained(lowercase__ )
__magic_name__ = tokenizer_p.from_pretrained(lowercase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase__ , lowercase__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowercase__ )
# Save tokenizer rust, legacy_format=True
__magic_name__ = tempfile.mkdtemp()
__magic_name__ = tokenizer_r.save_pretrained(lowercase__ , legacy_format=lowercase__ )
__magic_name__ = tokenizer_p.save_pretrained(lowercase__ )
# Checks it save with the same files
self.assertSequenceEqual(lowercase__ , lowercase__ )
# Checks everything loads correctly in the same way
__magic_name__ = tokenizer_r.from_pretrained(lowercase__ )
__magic_name__ = tokenizer_p.from_pretrained(lowercase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase__ , lowercase__ ) )
shutil.rmtree(lowercase__ )
# Save tokenizer rust, legacy_format=False
__magic_name__ = tempfile.mkdtemp()
__magic_name__ = tokenizer_r.save_pretrained(lowercase__ , legacy_format=lowercase__ )
__magic_name__ = tokenizer_p.save_pretrained(lowercase__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__magic_name__ = tokenizer_r.from_pretrained(lowercase__ )
__magic_name__ = tokenizer_p.from_pretrained(lowercase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase__ , lowercase__ ) )
shutil.rmtree(lowercase__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
_a = """facebook/mbart-large-en-ro"""
_a = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
_a = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
_a = [8274, 127873, 25916, 7, 8622, 2071, 438, 67485, 53, 187895, 23, 51712, 2, EN_CODE]
@classmethod
def __A ( cls ) -> Dict:
'''simple docstring'''
__magic_name__ = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' )
__magic_name__ = 1
return cls
def __A ( self ) -> Optional[int]:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 25_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 25_00_04 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 25_00_20 )
def __A ( self ) -> Any:
'''simple docstring'''
__magic_name__ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowercase__ )
def __A ( self ) -> Dict:
'''simple docstring'''
self.assertIn(lowercase__ , self.tokenizer.all_special_ids )
__magic_name__ = [RO_CODE, 8_84, 90_19, 96, 9, 9_16, 8_67_92, 36, 1_87_43, 1_55_96, 5, 2]
__magic_name__ = self.tokenizer.decode(lowercase__ , skip_special_tokens=lowercase__ )
__magic_name__ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
self.assertNotIn(self.tokenizer.eos_token , lowercase__ )
def __A ( self ) -> List[Any]:
'''simple docstring'''
__magic_name__ = ["this is gunna be a long sentence " * 20]
assert isinstance(src_text[0] , lowercase__ )
__magic_name__ = 10
__magic_name__ = self.tokenizer(lowercase__ , max_length=lowercase__ , truncation=lowercase__ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , lowercase__ )
self.assertEqual(len(lowercase__ ) , lowercase__ )
def __A ( self ) -> int:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [25_00_26, 25_00_01] )
def __A ( self ) -> List[Any]:
'''simple docstring'''
__magic_name__ = tempfile.mkdtemp()
__magic_name__ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowercase__ )
__magic_name__ = MBartTokenizer.from_pretrained(lowercase__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowercase__ )
@require_torch
def __A ( self ) -> Tuple:
'''simple docstring'''
__magic_name__ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowercase__ , return_tensors='''pt''' )
__magic_name__ = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def __A ( self ) -> Dict:
'''simple docstring'''
__magic_name__ = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowercase__ , truncation=lowercase__ , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
__magic_name__ = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
__magic_name__ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowercase__ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def __A ( self ) -> int:
'''simple docstring'''
__magic_name__ = self.tokenizer(self.src_text , padding=lowercase__ , truncation=lowercase__ , max_length=3 , return_tensors='''pt''' )
__magic_name__ = self.tokenizer(
text_target=self.tgt_text , padding=lowercase__ , truncation=lowercase__ , max_length=10 , return_tensors='''pt''' )
__magic_name__ = targets["input_ids"]
__magic_name__ = shift_tokens_right(lowercase__ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def __A ( self ) -> List[Any]:
'''simple docstring'''
__magic_name__ = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(lowercase__ ) , {
# A, test, EOS, en_XX
'''input_ids''': [[62, 30_34, 2, 25_00_04]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 25_00_01,
} , ) | 702 |
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] ):
__magic_name__ = SwinConfig(image_size=192 )
if "base" in model_name:
__magic_name__ = 6
__magic_name__ = 128
__magic_name__ = (2, 2, 18, 2)
__magic_name__ = (4, 8, 16, 32)
elif "large" in model_name:
__magic_name__ = 12
__magic_name__ = 192
__magic_name__ = (2, 2, 18, 2)
__magic_name__ = (6, 12, 24, 48)
else:
raise ValueError('''Model not supported, only supports base and large variants''' )
__magic_name__ = window_size
__magic_name__ = embed_dim
__magic_name__ = depths
__magic_name__ = num_heads
return config
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] ):
if "encoder.mask_token" in name:
__magic_name__ = name.replace('''encoder.mask_token''' , '''embeddings.mask_token''' )
if "encoder.patch_embed.proj" in name:
__magic_name__ = name.replace('''encoder.patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "encoder.patch_embed.norm" in name:
__magic_name__ = name.replace('''encoder.patch_embed.norm''' , '''embeddings.norm''' )
if "attn.proj" in name:
__magic_name__ = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
__magic_name__ = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
__magic_name__ = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
__magic_name__ = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
__magic_name__ = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
__magic_name__ = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
__magic_name__ = '''layernorm.weight'''
if name == "encoder.norm.bias":
__magic_name__ = '''layernorm.bias'''
if "decoder" in name:
pass
else:
__magic_name__ = '''swin.''' + name
return name
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Any ):
for key in orig_state_dict.copy().keys():
__magic_name__ = orig_state_dict.pop(snake_case_ )
if "attn_mask" in key:
pass
elif "qkv" in key:
__magic_name__ = key.split('''.''' )
__magic_name__ = int(key_split[2] )
__magic_name__ = int(key_split[4] )
__magic_name__ = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__magic_name__ = val[:dim, :]
__magic_name__ = val[
dim : dim * 2, :
]
__magic_name__ = val[-dim:, :]
else:
__magic_name__ = val[
:dim
]
__magic_name__ = val[
dim : dim * 2
]
__magic_name__ = val[
-dim:
]
else:
__magic_name__ = val
return orig_state_dict
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[str] , snake_case_ : int , snake_case_ : Any , snake_case_ : str ):
__magic_name__ = torch.load(snake_case_ , map_location='''cpu''' )['''model''']
__magic_name__ = get_swin_config(snake_case_ )
__magic_name__ = SwinForMaskedImageModeling(snake_case_ )
model.eval()
__magic_name__ = convert_state_dict(snake_case_ , snake_case_ )
model.load_state_dict(snake_case_ )
__magic_name__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__magic_name__ = ViTImageProcessor(size={'''height''': 192, '''width''': 192} )
__magic_name__ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
__magic_name__ = image_processor(images=snake_case_ , return_tensors='''pt''' )
with torch.no_grad():
__magic_name__ = model(**snake_case_ ).logits
print(outputs.keys() )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case_ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(snake_case_ )
if push_to_hub:
print(f'Pushing model and image processor for {model_name} to hub' )
model.push_to_hub(f'microsoft/{model_name}' )
image_processor.push_to_hub(f'microsoft/{model_name}' )
if __name__ == "__main__":
a_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='swin-base-simmim-window6-192',
type=str,
choices=['swin-base-simmim-window6-192', 'swin-large-simmim-window12-192'],
help='Name of the Swin SimMIM model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth',
type=str,
help='Path to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
a_ : Optional[Any] = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub) | 678 | 0 |
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __A ( self ) -> List[str]:
'''simple docstring'''
__magic_name__ = load_tool('''text-to-speech''' )
self.tool.setup()
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
__magic_name__ = self.tool('''hey''' )
__magic_name__ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_00_59_66_66_88_32_11_58_29, -0.0_00_36_57_64_01_90_79_50_64, -0.00_01_34_39_50_27_99_88_34_85] ) , ) )
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
__magic_name__ = self.tool('''hey''' )
__magic_name__ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_00_59_66_66_88_32_11_58_29, -0.0_00_36_57_64_01_90_79_50_64, -0.00_01_34_39_50_27_99_88_34_85] ) , ) ) | 703 |
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
return "".join(sorted(snake_case_ ) )
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
return word_by_signature[signature(snake_case_ )]
a_ : str = Path(__file__).parent.joinpath('words.txt').read_text(encoding='utf-8')
a_ : Optional[Any] = sorted({word.strip().lower() for word in data.splitlines()})
a_ : List[Any] = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
a_ : Optional[Any] = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('anagrams.txt', 'w') as file:
file.write('all_anagrams = \n ')
file.write(pprint.pformat(all_anagrams)) | 678 | 0 |
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> Any:
'''simple docstring'''
debug_launcher(test_script.main )
def __A ( self ) -> int:
'''simple docstring'''
debug_launcher(test_ops.main ) | 704 |
from __future__ import annotations
from scipy.special import comb # type: ignore
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , A ) -> Tuple:
'''simple docstring'''
__magic_name__ = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
__magic_name__ = len(A ) - 1
def __A ( self , A ) -> list[float]:
'''simple docstring'''
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__magic_name__ = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , A ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(A ) , 5 ) == 1
return output_values
def __A ( self , A ) -> tuple[float, float]:
'''simple docstring'''
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__magic_name__ = self.basis_function(A )
__magic_name__ = 0.0
__magic_name__ = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def __A ( self , A = 0.01 ) -> Tuple:
'''simple docstring'''
from matplotlib import pyplot as plt # type: ignore
__magic_name__ = [] # x coordinates of points to plot
__magic_name__ = [] # y coordinates of points to plot
__magic_name__ = 0.0
while t <= 1:
__magic_name__ = self.bezier_curve_function(A )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
__magic_name__ = [i[0] for i in self.list_of_points]
__magic_name__ = [i[1] for i in self.list_of_points]
plt.plot(
A , A , color='''blue''' , label='''Curve of Degree ''' + str(self.degree ) , )
plt.scatter(A , A , color='''red''' , label='''Control Points''' )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3 | 678 | 0 |
'''simple docstring'''
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
a_ : Any = re.compile(r'\b(a|an|the)\b', re.UNICODE)
a_ : List[str] = None
def _SCREAMING_SNAKE_CASE ( ):
__magic_name__ = argparse.ArgumentParser('''Official evaluation script for SQuAD version 2.0.''' )
parser.add_argument('''data_file''' , metavar='''data.json''' , help='''Input data JSON file.''' )
parser.add_argument('''pred_file''' , metavar='''pred.json''' , help='''Model predictions.''' )
parser.add_argument(
'''--out-file''' , '''-o''' , metavar='''eval.json''' , help='''Write accuracy metrics to file (default is stdout).''' )
parser.add_argument(
'''--na-prob-file''' , '''-n''' , metavar='''na_prob.json''' , help='''Model estimates of probability of no answer.''' )
parser.add_argument(
'''--na-prob-thresh''' , '''-t''' , type=__snake_case , default=1.0 , help='''Predict "" if no-answer probability exceeds this (default = 1.0).''' , )
parser.add_argument(
'''--out-image-dir''' , '''-p''' , metavar='''out_images''' , default=__snake_case , help='''Save precision-recall curves to directory.''' )
parser.add_argument('''--verbose''' , '''-v''' , action='''store_true''' )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
__magic_name__ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
__magic_name__ = bool(qa['''answers''']['''text'''] )
return qid_to_has_ans
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] ):
def remove_articles(snake_case_ : Tuple ):
return ARTICLES_REGEX.sub(''' ''' , __snake_case )
def white_space_fix(snake_case_ : Optional[Any] ):
return " ".join(text.split() )
def remove_punc(snake_case_ : Optional[Any] ):
__magic_name__ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(snake_case_ : Any ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__snake_case ) ) ) )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] ):
if not s:
return []
return normalize_answer(__snake_case ).split()
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] , snake_case_ : str ):
return int(normalize_answer(__snake_case ) == normalize_answer(__snake_case ) )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : Optional[int] ):
__magic_name__ = get_tokens(__snake_case )
__magic_name__ = get_tokens(__snake_case )
__magic_name__ = collections.Counter(__snake_case ) & collections.Counter(__snake_case )
__magic_name__ = sum(common.values() )
if len(__snake_case ) == 0 or len(__snake_case ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
__magic_name__ = 1.0 * num_same / len(__snake_case )
__magic_name__ = 1.0 * num_same / len(__snake_case )
__magic_name__ = (2 * precision * recall) / (precision + recall)
return fa
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : List[Any] ):
__magic_name__ = {}
__magic_name__ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
__magic_name__ = qa['''id''']
__magic_name__ = [t for t in qa['''answers''']['''text'''] if normalize_answer(__snake_case )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
__magic_name__ = ['''''']
if qid not in preds:
print(f'Missing prediction for {qid}' )
continue
__magic_name__ = preds[qid]
# Take max over all gold answers
__magic_name__ = max(compute_exact(__snake_case , __snake_case ) for a in gold_answers )
__magic_name__ = max(compute_fa(__snake_case , __snake_case ) for a in gold_answers )
return exact_scores, fa_scores
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] , snake_case_ : Any , snake_case_ : Optional[Any] , snake_case_ : Dict ):
__magic_name__ = {}
for qid, s in scores.items():
__magic_name__ = na_probs[qid] > na_prob_thresh
if pred_na:
__magic_name__ = float(not qid_to_has_ans[qid] )
else:
__magic_name__ = s
return new_scores
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] , snake_case_ : int , snake_case_ : Optional[int]=None ):
if not qid_list:
__magic_name__ = len(__snake_case )
return collections.OrderedDict(
[
('''exact''', 100.0 * sum(exact_scores.values() ) / total),
('''f1''', 100.0 * sum(fa_scores.values() ) / total),
('''total''', total),
] )
else:
__magic_name__ = len(__snake_case )
return collections.OrderedDict(
[
('''exact''', 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
('''f1''', 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
('''total''', total),
] )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] , snake_case_ : int , snake_case_ : Tuple ):
for k in new_eval:
__magic_name__ = new_eval[k]
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] , snake_case_ : Any , snake_case_ : Any , snake_case_ : Optional[Any] ):
plt.step(__snake_case , __snake_case , color='''b''' , alpha=0.2 , where='''post''' )
plt.fill_between(__snake_case , __snake_case , step='''post''' , alpha=0.2 , color='''b''' )
plt.xlabel('''Recall''' )
plt.ylabel('''Precision''' )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(__snake_case )
plt.savefig(__snake_case )
plt.clf()
def _SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : Tuple , snake_case_ : Union[str, Any] , snake_case_ : List[str] , snake_case_ : int=None , snake_case_ : Tuple=None ):
__magic_name__ = sorted(__snake_case , key=lambda snake_case_ : na_probs[k] )
__magic_name__ = 0.0
__magic_name__ = 1.0
__magic_name__ = 0.0
__magic_name__ = [1.0]
__magic_name__ = [0.0]
__magic_name__ = 0.0
for i, qid in enumerate(__snake_case ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
__magic_name__ = true_pos / float(i + 1 )
__magic_name__ = true_pos / float(__snake_case )
if i == len(__snake_case ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(__snake_case )
recalls.append(__snake_case )
if out_image:
plot_pr_curve(__snake_case , __snake_case , __snake_case , __snake_case )
return {"ap": 100.0 * avg_prec}
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : int , snake_case_ : Optional[int] , snake_case_ : Dict , snake_case_ : List[str] , snake_case_ : int ):
if out_image_dir and not os.path.exists(__snake_case ):
os.makedirs(__snake_case )
__magic_name__ = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
__magic_name__ = make_precision_recall_eval(
__snake_case , __snake_case , __snake_case , __snake_case , out_image=os.path.join(__snake_case , '''pr_exact.png''' ) , title='''Precision-Recall curve for Exact Match score''' , )
__magic_name__ = make_precision_recall_eval(
__snake_case , __snake_case , __snake_case , __snake_case , out_image=os.path.join(__snake_case , '''pr_f1.png''' ) , title='''Precision-Recall curve for F1 score''' , )
__magic_name__ = {k: float(__snake_case ) for k, v in qid_to_has_ans.items()}
__magic_name__ = make_precision_recall_eval(
__snake_case , __snake_case , __snake_case , __snake_case , out_image=os.path.join(__snake_case , '''pr_oracle.png''' ) , title='''Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)''' , )
merge_eval(__snake_case , __snake_case , '''pr_exact''' )
merge_eval(__snake_case , __snake_case , '''pr_f1''' )
merge_eval(__snake_case , __snake_case , '''pr_oracle''' )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] , snake_case_ : List[str] , snake_case_ : int , snake_case_ : int ):
if not qid_list:
return
__magic_name__ = [na_probs[k] for k in qid_list]
__magic_name__ = np.ones_like(__snake_case ) / float(len(__snake_case ) )
plt.hist(__snake_case , weights=__snake_case , bins=20 , range=(0.0, 1.0) )
plt.xlabel('''Model probability of no-answer''' )
plt.ylabel('''Proportion of dataset''' )
plt.title(f'Histogram of no-answer probability: {name}' )
plt.savefig(os.path.join(__snake_case , f'na_prob_hist_{name}.png' ) )
plt.clf()
def _SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : Dict , snake_case_ : Dict , snake_case_ : List[str] ):
__magic_name__ = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
__magic_name__ = num_no_ans
__magic_name__ = cur_score
__magic_name__ = 0.0
__magic_name__ = sorted(__snake_case , key=lambda snake_case_ : na_probs[k] )
for i, qid in enumerate(__snake_case ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
__magic_name__ = scores[qid]
else:
if preds[qid]:
__magic_name__ = -1
else:
__magic_name__ = 0
cur_score += diff
if cur_score > best_score:
__magic_name__ = cur_score
__magic_name__ = na_probs[qid]
return 100.0 * best_score / len(__snake_case ), best_thresh
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] , snake_case_ : Optional[int] , snake_case_ : Dict , snake_case_ : List[str] , snake_case_ : Dict , snake_case_ : str ):
__magic_name__ , __magic_name__ = find_best_thresh(__snake_case , __snake_case , __snake_case , __snake_case )
__magic_name__ , __magic_name__ = find_best_thresh(__snake_case , __snake_case , __snake_case , __snake_case )
__magic_name__ = best_exact
__magic_name__ = exact_thresh
__magic_name__ = best_fa
__magic_name__ = fa_thresh
def _SCREAMING_SNAKE_CASE ( ):
with open(OPTS.data_file ) as f:
__magic_name__ = json.load(__snake_case )
__magic_name__ = dataset_json['''data''']
with open(OPTS.pred_file ) as f:
__magic_name__ = json.load(__snake_case )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
__magic_name__ = json.load(__snake_case )
else:
__magic_name__ = {k: 0.0 for k in preds}
__magic_name__ = make_qid_to_has_ans(__snake_case ) # maps qid to True/False
__magic_name__ = [k for k, v in qid_to_has_ans.items() if v]
__magic_name__ = [k for k, v in qid_to_has_ans.items() if not v]
__magic_name__ , __magic_name__ = get_raw_scores(__snake_case , __snake_case )
__magic_name__ = apply_no_ans_threshold(__snake_case , __snake_case , __snake_case , OPTS.na_prob_thresh )
__magic_name__ = apply_no_ans_threshold(__snake_case , __snake_case , __snake_case , OPTS.na_prob_thresh )
__magic_name__ = make_eval_dict(__snake_case , __snake_case )
if has_ans_qids:
__magic_name__ = make_eval_dict(__snake_case , __snake_case , qid_list=__snake_case )
merge_eval(__snake_case , __snake_case , '''HasAns''' )
if no_ans_qids:
__magic_name__ = make_eval_dict(__snake_case , __snake_case , qid_list=__snake_case )
merge_eval(__snake_case , __snake_case , '''NoAns''' )
if OPTS.na_prob_file:
find_all_best_thresh(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , OPTS.out_image_dir )
histogram_na_prob(__snake_case , __snake_case , OPTS.out_image_dir , '''hasAns''' )
histogram_na_prob(__snake_case , __snake_case , OPTS.out_image_dir , '''noAns''' )
if OPTS.out_file:
with open(OPTS.out_file , '''w''' ) as f:
json.dump(__snake_case , __snake_case )
else:
print(json.dumps(__snake_case , indent=2 ) )
if __name__ == "__main__":
a_ : Dict = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
main() | 705 |
import re
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
__magic_name__ = re.compile(
r'''^(?:0|94|\+94|0{2}94)''' r'''7(0|1|2|4|5|6|7|8)''' r'''(-| |)''' r'''\d{7}$''' )
return bool(re.search(snake_case_ , snake_case_ ) )
if __name__ == "__main__":
a_ : Optional[int] = '0094702343221'
print(is_sri_lankan_phone_number(phone)) | 678 | 0 |
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
a_ : List[Any] = '''.'''
if __name__ == "__main__":
a_ : List[str] = os.path.join(REPO_PATH, 'utils/documentation_tests.txt')
a_ : Dict = []
a_ : Dict = []
with open(doctest_file_path) as fp:
for line in fp:
a_ : Optional[Any] = line.strip()
a_ : Tuple = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
a_ : Optional[Any] = '''\n'''.join(non_existent_paths)
raise ValueError(F"""`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}""")
if all_paths != sorted(all_paths):
raise ValueError('Files in `utils/documentation_tests.txt` are not in alphabetical order.')
| 706 |
import os
import sys
import unittest
a_ : int = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
a_ : Optional[Any] = os.path.join(git_repo_path, 'src', 'diffusers')
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = find_backend(''' if not is_torch_available():''' )
self.assertEqual(A , '''torch''' )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
__magic_name__ = find_backend(''' if not (is_torch_available() and is_transformers_available()):''' )
self.assertEqual(A , '''torch_and_transformers''' )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
__magic_name__ = find_backend(
''' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):''' )
self.assertEqual(A , '''torch_and_transformers_and_onnx''' )
def __A ( self ) -> str:
'''simple docstring'''
__magic_name__ = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , A )
self.assertIn('''torch_and_transformers''' , A )
self.assertIn('''flax_and_transformers''' , A )
self.assertIn('''torch_and_transformers_and_onnx''' , A )
# Likewise, we can't assert on the exact content of a key
self.assertIn('''UNet2DModel''' , objects['''torch'''] )
self.assertIn('''FlaxUNet2DConditionModel''' , objects['''flax'''] )
self.assertIn('''StableDiffusionPipeline''' , objects['''torch_and_transformers'''] )
self.assertIn('''FlaxStableDiffusionPipeline''' , objects['''flax_and_transformers'''] )
self.assertIn('''LMSDiscreteScheduler''' , objects['''torch_and_scipy'''] )
self.assertIn('''OnnxStableDiffusionPipeline''' , objects['''torch_and_transformers_and_onnx'''] )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = create_dummy_object('''CONSTANT''' , '''\'torch\'''' )
self.assertEqual(A , '''\nCONSTANT = None\n''' )
__magic_name__ = create_dummy_object('''function''' , '''\'torch\'''' )
self.assertEqual(
A , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' )
__magic_name__ = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
'''
__magic_name__ = create_dummy_object('''FakeClass''' , '''\'torch\'''' )
self.assertEqual(A , A )
def __A ( self ) -> int:
'''simple docstring'''
__magic_name__ = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
'''
__magic_name__ = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} )
self.assertEqual(dummy_files['''torch'''] , A ) | 678 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.