code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def _lowercase( __a : int , __a : int , __a : int , __a : int , __a : int , __a : int ):
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
a__ =ksize + 1
a__ =np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(__a ):
for x in range(__a ):
# distance from center
a__ =x - ksize // 2
a__ =y - ksize // 2
# degree to radiant
a__ =theta / 180 * np.pi
a__ =np.cos(_theta )
a__ =np.sin(_theta )
# get kernel x
a__ =cos_theta * px + sin_theta * py
# get kernel y
a__ =-sin_theta * px + cos_theta * py
# fill kernel
a__ =np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
_lowerCAmelCase: Union[str, Any] = imread('../image_data/lena.jpg')
# turn image in gray scale value
_lowerCAmelCase: Tuple = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
_lowerCAmelCase: Union[str, Any] = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
_lowerCAmelCase: Any = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
_lowerCAmelCase: Tuple = out / out.max() * 255
_lowerCAmelCase: int = out.astype(np.uinta)
imshow('Original', gray)
imshow('Gabor filter with 20x20 mask and 6 directions', out)
waitKey(0)
| 20 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ : List[Any] = {
'configuration_mobilebert': [
'MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'MobileBertConfig',
'MobileBertOnnxConfig',
],
'tokenization_mobilebert': ['MobileBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[Any] = ['MobileBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[str] = [
'MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileBertForMaskedLM',
'MobileBertForMultipleChoice',
'MobileBertForNextSentencePrediction',
'MobileBertForPreTraining',
'MobileBertForQuestionAnswering',
'MobileBertForSequenceClassification',
'MobileBertForTokenClassification',
'MobileBertLayer',
'MobileBertModel',
'MobileBertPreTrainedModel',
'load_tf_weights_in_mobilebert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Union[str, Any] = [
'TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileBertForMaskedLM',
'TFMobileBertForMultipleChoice',
'TFMobileBertForNextSentencePrediction',
'TFMobileBertForPreTraining',
'TFMobileBertForQuestionAnswering',
'TFMobileBertForSequenceClassification',
'TFMobileBertForTokenClassification',
'TFMobileBertMainLayer',
'TFMobileBertModel',
'TFMobileBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 44 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
UpperCAmelCase_ : str = None
UpperCAmelCase_ : List[str] = logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase_ : List[str] = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase_ : Dict = {
"facebook/nllb-large-en-ro": 1024,
"facebook/nllb-200-distilled-600M": 1024,
}
# fmt: off
UpperCAmelCase_ : Any = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class __A ( UpperCamelCase__ ):
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = ["""input_ids""", """attention_mask"""]
UpperCamelCase = NllbTokenizer
UpperCamelCase = []
UpperCamelCase = []
def __init__( self :Any , __snake_case :Union[str, Any]=None , __snake_case :Optional[int]=None , __snake_case :str="<s>" , __snake_case :Union[str, Any]="</s>" , __snake_case :Optional[int]="</s>" , __snake_case :Optional[Any]="<s>" , __snake_case :List[str]="<unk>" , __snake_case :List[str]="<pad>" , __snake_case :List[Any]="<mask>" , __snake_case :List[Any]=None , __snake_case :Any=None , __snake_case :Optional[Any]=None , __snake_case :Optional[Any]=False , **__snake_case :Optional[Any] , ):
'''simple docstring'''
__magic_name__ : Any =AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else mask_token
__magic_name__ : Any =legacy_behaviour
super().__init__(
vocab_file=__snake_case , tokenizer_file=__snake_case , bos_token=__snake_case , eos_token=__snake_case , sep_token=__snake_case , cls_token=__snake_case , unk_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , src_lang=__snake_case , tgt_lang=__snake_case , additional_special_tokens=__snake_case , legacy_behaviour=__snake_case , **__snake_case , )
__magic_name__ : Optional[int] =vocab_file
__magic_name__ : List[str] =False if not self.vocab_file else True
__magic_name__ : str =FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
__magic_name__ : Union[str, Any] ={
lang_code: self.convert_tokens_to_ids(__snake_case ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
__magic_name__ : Optional[Any] =src_lang if src_lang is not None else """eng_Latn"""
__magic_name__ : Union[str, Any] =self.convert_tokens_to_ids(self._src_lang )
__magic_name__ : Any =tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def A__ ( self :int ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def A__ ( self :str , __snake_case :str ):
'''simple docstring'''
__magic_name__ : str =new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def A__ ( self :List[Any] , __snake_case :List[int] , __snake_case :Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def A__ ( self :str , __snake_case :List[int] , __snake_case :Optional[List[int]] = None ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =[self.sep_token_id]
__magic_name__ : List[str] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A__ ( self :List[Any] , __snake_case :int , __snake_case :str , __snake_case :Optional[str] , __snake_case :Optional[str] , **__snake_case :Any ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
__magic_name__ : str =src_lang
__magic_name__ : Any =self(__snake_case , add_special_tokens=__snake_case , return_tensors=__snake_case , **__snake_case )
__magic_name__ : Union[str, Any] =self.convert_tokens_to_ids(__snake_case )
__magic_name__ : Tuple =tgt_lang_id
return inputs
def A__ ( self :Optional[Any] , __snake_case :List[str] , __snake_case :str = "eng_Latn" , __snake_case :Optional[List[str]] = None , __snake_case :str = "fra_Latn" , **__snake_case :List[Any] , ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =src_lang
__magic_name__ : List[Any] =tgt_lang
return super().prepare_seqaseq_batch(__snake_case , __snake_case , **__snake_case )
def A__ ( self :str ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def A__ ( self :Optional[int] ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def A__ ( self :Tuple , __snake_case :Any ):
'''simple docstring'''
__magic_name__ : Dict =self.convert_tokens_to_ids(__snake_case )
if self.legacy_behaviour:
__magic_name__ : Any =[]
__magic_name__ : str =[self.eos_token_id, self.cur_lang_code]
else:
__magic_name__ : Optional[int] =[self.cur_lang_code]
__magic_name__ : Tuple =[self.eos_token_id]
__magic_name__ : int =self.convert_ids_to_tokens(self.prefix_tokens )
__magic_name__ : Dict =self.convert_ids_to_tokens(self.suffix_tokens )
__magic_name__ : Union[str, Any] =processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def A__ ( self :Optional[int] , __snake_case :str ):
'''simple docstring'''
__magic_name__ : Optional[Any] =self.convert_tokens_to_ids(__snake_case )
if self.legacy_behaviour:
__magic_name__ : Any =[]
__magic_name__ : Optional[Any] =[self.eos_token_id, self.cur_lang_code]
else:
__magic_name__ : List[Any] =[self.cur_lang_code]
__magic_name__ : Dict =[self.eos_token_id]
__magic_name__ : Dict =self.convert_ids_to_tokens(self.prefix_tokens )
__magic_name__ : List[str] =self.convert_ids_to_tokens(self.suffix_tokens )
__magic_name__ : Optional[Any] =processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def A__ ( self :Union[str, Any] , __snake_case :str , __snake_case :Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(__snake_case ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory." )
return
__magic_name__ : str =os.path.join(
__snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ):
copyfile(self.vocab_file , __snake_case )
return (out_vocab_file,)
| 21 |
'''simple docstring'''
from __future__ import annotations
from scipy.special import comb # type: ignore
class UpperCAmelCase__ :
def __init__( self : Optional[Any],__A : list[tuple[float, float]] ):
_lowerCamelCase : Tuple = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
_lowerCamelCase : int = len(__A ) - 1
def lowerCamelCase_ ( self : Optional[int],__A : float ):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
_lowerCamelCase : list[float] = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree,__A ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(__A ),5 ) == 1
return output_values
def lowerCamelCase_ ( self : int,__A : float ):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
_lowerCamelCase : List[Any] = self.basis_function(__A )
_lowerCamelCase : str = 0.0
_lowerCamelCase : str = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def lowerCamelCase_ ( self : Optional[Any],__A : float = 0.01 ):
from matplotlib import pyplot as plt # type: ignore
_lowerCamelCase : list[float] = [] # x coordinates of points to plot
_lowerCamelCase : list[float] = [] # y coordinates of points to plot
_lowerCamelCase : Tuple = 0.0
while t <= 1:
_lowerCamelCase : str = self.bezier_curve_function(__A )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
_lowerCamelCase : List[str] = [i[0] for i in self.list_of_points]
_lowerCamelCase : Union[str, Any] = [i[1] for i in self.list_of_points]
plt.plot(
__A,__A,color="blue",label="Curve of Degree " + str(self.degree ),)
plt.scatter(__A,__A,color="red",label="Control Points" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 44 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
_snake_case : List[Any] = logging.get_logger('transformers.models.speecht5')
_snake_case : List[Any] = {
'speech_encoder_prenet.layer_norm': 'speecht5.encoder.prenet.feature_projection.layer_norm',
'speech_encoder_prenet.post_extract_proj': 'speecht5.encoder.prenet.feature_projection.projection',
'speech_encoder_prenet.pos_conv.0': 'speecht5.encoder.prenet.pos_conv_embed.conv',
'speech_encoder_prenet.mask_emb': 'speecht5.encoder.prenet.masked_spec_embed',
}
_snake_case : Union[str, Any] = {
'text_encoder_prenet.encoder_prenet.0': 'speecht5.encoder.prenet.embed_tokens',
'text_encoder_prenet.encoder_prenet.1.alpha': 'speecht5.encoder.prenet.encode_positions.alpha',
}
_snake_case : Tuple = {
'speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0': 'speecht5.decoder.prenet.layers.0',
'speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0': 'speecht5.decoder.prenet.layers.1',
'speech_decoder_prenet.decoder_prenet.0.1': 'speecht5.decoder.prenet.final_layer',
'speech_decoder_prenet.decoder_prenet.1.alpha': 'speecht5.decoder.prenet.encode_positions.alpha',
'speech_decoder_prenet.spkembs_layer.0': 'speecht5.decoder.prenet.speaker_embeds_layer',
}
_snake_case : Optional[int] = {
'speech_decoder_postnet.feat_out': 'speech_decoder_postnet.feat_out',
'speech_decoder_postnet.prob_out': 'speech_decoder_postnet.prob_out',
'speech_decoder_postnet.postnet.postnet.0.0': 'speech_decoder_postnet.layers.0.conv',
'speech_decoder_postnet.postnet.postnet.0.1': 'speech_decoder_postnet.layers.0.batch_norm',
'speech_decoder_postnet.postnet.postnet.1.0': 'speech_decoder_postnet.layers.1.conv',
'speech_decoder_postnet.postnet.postnet.1.1': 'speech_decoder_postnet.layers.1.batch_norm',
'speech_decoder_postnet.postnet.postnet.2.0': 'speech_decoder_postnet.layers.2.conv',
'speech_decoder_postnet.postnet.postnet.2.1': 'speech_decoder_postnet.layers.2.batch_norm',
'speech_decoder_postnet.postnet.postnet.3.0': 'speech_decoder_postnet.layers.3.conv',
'speech_decoder_postnet.postnet.postnet.3.1': 'speech_decoder_postnet.layers.3.batch_norm',
'speech_decoder_postnet.postnet.postnet.4.0': 'speech_decoder_postnet.layers.4.conv',
'speech_decoder_postnet.postnet.postnet.4.1': 'speech_decoder_postnet.layers.4.batch_norm',
}
_snake_case : List[Any] = {
'text_decoder_prenet.embed_tokens': 'speecht5.decoder.prenet.embed_tokens',
}
_snake_case : List[str] = {
'text_decoder_postnet.output_projection': 'text_decoder_postnet.lm_head',
}
_snake_case : Tuple = {
'encoder.layers.*.self_attn.k_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj',
'encoder.layers.*.self_attn.v_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj',
'encoder.layers.*.self_attn.q_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj',
'encoder.layers.*.self_attn.out_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj',
'encoder.layers.*.self_attn_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.layer_norm',
'encoder.layers.*.fc1': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense',
'encoder.layers.*.fc2': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense',
'encoder.layers.*.final_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'speecht5.encoder.wrapped_encoder.layer_norm',
'encoder.pos_emb.pe_k': 'speecht5.encoder.wrapped_encoder.embed_positions.pe_k',
}
_snake_case : str = {
'decoder.layers.*.self_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj',
'decoder.layers.*.self_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj',
'decoder.layers.*.self_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj',
'decoder.layers.*.self_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj',
'decoder.layers.*.self_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm',
'decoder.layers.*.encoder_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj',
'decoder.layers.*.encoder_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj',
'decoder.layers.*.encoder_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj',
'decoder.layers.*.encoder_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj',
'decoder.layers.*.encoder_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm',
'decoder.layers.*.fc1': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense',
'decoder.layers.*.fc2': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense',
'decoder.layers.*.final_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm',
}
_snake_case : str = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
_snake_case : List[str] = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
_snake_case : Dict = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
_snake_case : Optional[int] = []
_snake_case : List[Any] = [
'encoder.version',
'encoder.layers.*.norm_k.weight',
'encoder.layers.*.norm_k.bias',
'decoder.version',
'decoder.layers.*.norm_k.weight',
'decoder.layers.*.norm_k.bias',
'decoder.pos_emb.pe_k',
'speech_encoder_prenet.embed_positions._float_tensor',
'text_decoder_prenet.embed_positions._float_tensor',
]
_snake_case : List[str] = IGNORE_KEYS + [
'encoder.proj',
'text_encoder_prenet.*',
'speech_decoder_prenet.*',
'speech_decoder_postnet.*',
]
_snake_case : Tuple = IGNORE_KEYS + [
'encoder.proj',
'speech_encoder_prenet.*',
'text_decoder_prenet.*',
'text_decoder_postnet.*',
]
_snake_case : Dict = IGNORE_KEYS + [
'encoder.proj',
'text_encoder_prenet.*',
'text_decoder_prenet.*',
'text_decoder_postnet.*',
]
def snake_case_ (UpperCamelCase : Dict , UpperCamelCase : Optional[Any] , UpperCamelCase : Dict , UpperCamelCase : List[str] , UpperCamelCase : Any ):
'''simple docstring'''
for attribute in key.split('''.''' ):
_a = getattr(UpperCamelCase , UpperCamelCase )
if weight_type is not None:
_a = getattr(UpperCamelCase , UpperCamelCase ).shape
else:
_a = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
_a = value
elif weight_type == "weight_g":
_a = value
elif weight_type == "weight_v":
_a = value
elif weight_type == "bias":
_a = value
elif weight_type == "running_mean":
_a = value
elif weight_type == "running_var":
_a = value
elif weight_type == "num_batches_tracked":
_a = value
else:
_a = value
logger.info(f'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' )
def snake_case_ (UpperCamelCase : Dict , UpperCamelCase : str ):
'''simple docstring'''
for key in ignore_keys:
if key.endswith('''.*''' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
_a , _a = key.split('''.*.''' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def snake_case_ (UpperCamelCase : Tuple , UpperCamelCase : Optional[int] , UpperCamelCase : Tuple ):
'''simple docstring'''
_a = []
if task == "s2t":
_a = hf_model.speechta.encoder.prenet.feature_encoder
_a = MAPPING_S2T
_a = IGNORE_KEYS_S2T
elif task == "t2s":
_a = None
_a = MAPPING_T2S
_a = IGNORE_KEYS_T2S
elif task == "s2s":
_a = hf_model.speechta.encoder.prenet.feature_encoder
_a = MAPPING_S2S
_a = IGNORE_KEYS_S2S
else:
raise ValueError(f'Unsupported task: {task}' )
for name, value in fairseq_dict.items():
if should_ignore(UpperCamelCase , UpperCamelCase ):
logger.info(f'{name} was ignored' )
continue
_a = False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , hf_model.config.feat_extract_norm == '''group''' , )
_a = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
_a , _a = key.split('''.*.''' )
if prefix in name and suffix in name:
_a = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
_a = True
if "*" in mapped_key:
_a = name.split(UpperCamelCase )[0].split('''.''' )[-2]
_a = mapped_key.replace('''*''' , UpperCamelCase )
if "weight_g" in name:
_a = '''weight_g'''
elif "weight_v" in name:
_a = '''weight_v'''
elif "bias" in name:
_a = '''bias'''
elif "weight" in name:
_a = '''weight'''
elif "running_mean" in name:
_a = '''running_mean'''
elif "running_var" in name:
_a = '''running_var'''
elif "num_batches_tracked" in name:
_a = '''num_batches_tracked'''
else:
_a = None
set_recursively(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
continue
if not is_used:
unused_weights.append(UpperCamelCase )
logger.warning(f'Unused weights: {unused_weights}' )
def snake_case_ (UpperCamelCase : Union[str, Any] , UpperCamelCase : str , UpperCamelCase : Dict , UpperCamelCase : Dict , UpperCamelCase : int ):
'''simple docstring'''
_a = full_name.split('''conv_layers.''' )[-1]
_a = name.split('''.''' )
_a = int(items[0] )
_a = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
_a = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
_a = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
_a = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
_a = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(UpperCamelCase )
@torch.no_grad()
def snake_case_ (UpperCamelCase : Tuple , UpperCamelCase : List[str] , UpperCamelCase : List[Any] , UpperCamelCase : Optional[Any]=None , UpperCamelCase : Any=None , UpperCamelCase : Any=None , ):
'''simple docstring'''
if config_path is not None:
_a = SpeechTaConfig.from_pretrained(UpperCamelCase )
else:
_a = SpeechTaConfig()
if task == "s2t":
_a = config.max_text_positions
_a = SpeechTaForSpeechToText(UpperCamelCase )
elif task == "t2s":
_a = 1876
_a = 600
_a = config.max_speech_positions
_a = SpeechTaForTextToSpeech(UpperCamelCase )
elif task == "s2s":
_a = 1876
_a = config.max_speech_positions
_a = SpeechTaForSpeechToSpeech(UpperCamelCase )
else:
raise ValueError(f'Unknown task name: {task}' )
if vocab_path:
_a = SpeechTaTokenizer(UpperCamelCase , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
_a = AddedToken('''<mask>''' , lstrip=UpperCamelCase , rstrip=UpperCamelCase )
_a = mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token} )
tokenizer.add_tokens(['''<ctc_blank>'''] )
_a = SpeechTaFeatureExtractor()
_a = SpeechTaProcessor(tokenizer=UpperCamelCase , feature_extractor=UpperCamelCase )
processor.save_pretrained(UpperCamelCase )
_a = torch.load(UpperCamelCase )
recursively_load_weights(fairseq_checkpoint['''model'''] , UpperCamelCase , UpperCamelCase )
model.save_pretrained(UpperCamelCase )
if repo_id:
print('''Pushing to the hub...''' )
processor.push_to_hub(UpperCamelCase )
model.push_to_hub(UpperCamelCase )
if __name__ == "__main__":
_snake_case : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'--task',
default='s2t',
type=str,
help='Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.',
)
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--vocab_path', default=None, type=str, help='Path to SentencePiece model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
_snake_case : int = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 22 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase__ ( metaclass=A ):
lowerCAmelCase_ = ['transformers', 'torch', 'note_seq']
def __init__( self : str,*__A : List[str],**__A : List[Any] ):
requires_backends(self,["transformers", "torch", "note_seq"] )
@classmethod
def lowerCamelCase_ ( cls : Optional[Any],*__A : str,**__A : Tuple ):
requires_backends(cls,["transformers", "torch", "note_seq"] )
@classmethod
def lowerCamelCase_ ( cls : Dict,*__A : Dict,**__A : Tuple ):
requires_backends(cls,["transformers", "torch", "note_seq"] )
| 44 | 0 |
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
snake_case__ : Tuple = logging.get_logger(__name__)
def _snake_case (__lowercase , __lowercase):
UpperCamelCase_ = nn.functional.normalize(__lowercase)
UpperCamelCase_ = nn.functional.normalize(__lowercase)
return torch.mm(__lowercase , normalized_text_embeds.t())
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = CLIPConfig
A_ = ["""CLIPEncoderLayer"""]
def __init__( self , _UpperCAmelCase ) -> Dict:
super().__init__(_UpperCAmelCase )
UpperCamelCase_ = CLIPVisionModel(config.vision_config )
UpperCamelCase_ = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=_UpperCAmelCase )
UpperCamelCase_ = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=_UpperCAmelCase )
UpperCamelCase_ = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=_UpperCAmelCase )
UpperCamelCase_ = nn.Parameter(torch.ones(17 ) , requires_grad=_UpperCAmelCase )
UpperCamelCase_ = nn.Parameter(torch.ones(3 ) , requires_grad=_UpperCAmelCase )
@torch.no_grad()
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
UpperCamelCase_ = self.vision_model(_UpperCAmelCase )[1] # pooled_output
UpperCamelCase_ = self.visual_projection(_UpperCAmelCase )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCamelCase_ = cosine_distance(_UpperCAmelCase , self.special_care_embeds ).cpu().float().numpy()
UpperCamelCase_ = cosine_distance(_UpperCAmelCase , self.concept_embeds ).cpu().float().numpy()
UpperCamelCase_ = []
UpperCamelCase_ = image_embeds.shape[0]
for i in range(_UpperCAmelCase ):
UpperCamelCase_ = {'special_scores': {}, 'special_care': [], 'concept_scores': {}, 'bad_concepts': []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
UpperCamelCase_ = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
UpperCamelCase_ = special_cos_dist[i][concept_idx]
UpperCamelCase_ = self.special_care_embeds_weights[concept_idx].item()
UpperCamelCase_ = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img['special_scores'][concept_idx]} )
UpperCamelCase_ = 0.0_1
for concept_idx in range(len(cos_dist[0] ) ):
UpperCamelCase_ = cos_dist[i][concept_idx]
UpperCamelCase_ = self.concept_embeds_weights[concept_idx].item()
UpperCamelCase_ = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(_UpperCAmelCase )
result.append(_UpperCAmelCase )
UpperCamelCase_ = [len(res['bad_concepts'] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
UpperCamelCase_ = self.vision_model(_UpperCAmelCase )[1] # pooled_output
UpperCamelCase_ = self.visual_projection(_UpperCAmelCase )
UpperCamelCase_ = cosine_distance(_UpperCAmelCase , self.special_care_embeds )
UpperCamelCase_ = cosine_distance(_UpperCAmelCase , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
UpperCamelCase_ = 0.0
UpperCamelCase_ = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
UpperCamelCase_ = torch.any(special_scores > 0 , dim=1 )
UpperCamelCase_ = special_care * 0.0_1
UpperCamelCase_ = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
UpperCamelCase_ = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
UpperCamelCase_ = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 23 |
'''simple docstring'''
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = CodeGenTokenizer
lowerCAmelCase_ = CodeGenTokenizerFast
lowerCAmelCase_ = True
lowerCAmelCase_ = {'add_prefix_space': True}
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : List[str] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCamelCase : Dict = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
_lowerCamelCase : Any = dict(zip(__A,range(len(__A ) ) ) )
_lowerCamelCase : Optional[int] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
_lowerCamelCase : Tuple = {"unk_token": "<unk>"}
_lowerCamelCase : Optional[Any] = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["vocab_file"] )
_lowerCamelCase : Dict = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file,"w",encoding="utf-8" ) as fp:
fp.write(json.dumps(__A ) + "\n" )
with open(self.merges_file,"w",encoding="utf-8" ) as fp:
fp.write("\n".join(__A ) )
def lowerCamelCase_ ( self : Dict,**__A : Tuple ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname,**__A )
def lowerCamelCase_ ( self : Union[str, Any],**__A : int ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname,**__A )
def lowerCamelCase_ ( self : str,__A : Dict ):
_lowerCamelCase : Optional[Any] = "lower newer"
_lowerCamelCase : Union[str, Any] = "lower newer"
return input_text, output_text
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : int = CodeGenTokenizer(self.vocab_file,self.merges_file,**self.special_tokens_map )
_lowerCamelCase : Any = "lower newer"
_lowerCamelCase : Optional[Any] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
_lowerCamelCase : List[Any] = tokenizer.tokenize(__A,add_prefix_space=__A )
self.assertListEqual(__A,__A )
_lowerCamelCase : Union[str, Any] = tokens + [tokenizer.unk_token]
_lowerCamelCase : Dict = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ),__A )
def lowerCamelCase_ ( self : Any ):
if not self.test_rust_tokenizer:
return
_lowerCamelCase : str = self.get_tokenizer()
_lowerCamelCase : Optional[Any] = self.get_rust_tokenizer(add_prefix_space=__A )
_lowerCamelCase : Union[str, Any] = "lower newer"
# Testing tokenization
_lowerCamelCase : List[Any] = tokenizer.tokenize(__A,add_prefix_space=__A )
_lowerCamelCase : str = rust_tokenizer.tokenize(__A )
self.assertListEqual(__A,__A )
# Testing conversion to ids without special tokens
_lowerCamelCase : str = tokenizer.encode(__A,add_special_tokens=__A,add_prefix_space=__A )
_lowerCamelCase : List[str] = rust_tokenizer.encode(__A,add_special_tokens=__A )
self.assertListEqual(__A,__A )
# Testing conversion to ids with special tokens
_lowerCamelCase : List[Any] = self.get_rust_tokenizer(add_prefix_space=__A )
_lowerCamelCase : Union[str, Any] = tokenizer.encode(__A,add_prefix_space=__A )
_lowerCamelCase : Optional[int] = rust_tokenizer.encode(__A )
self.assertListEqual(__A,__A )
# Testing the unknown token
_lowerCamelCase : Optional[int] = tokens + [rust_tokenizer.unk_token]
_lowerCamelCase : Optional[Any] = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__A ),__A )
def lowerCamelCase_ ( self : Tuple,*__A : Any,**__A : Any ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def lowerCamelCase_ ( self : int,__A : Optional[int]=1_5 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_lowerCamelCase : Tuple = self.rust_tokenizer_class.from_pretrained(__A,**__A )
# Simple input
_lowerCamelCase : Dict = "This is a simple input"
_lowerCamelCase : Any = ["This is a simple input 1", "This is a simple input 2"]
_lowerCamelCase : Tuple = ("This is a simple input", "This is a pair")
_lowerCamelCase : Tuple = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(__A,tokenizer_r.encode,__A,max_length=__A,padding="max_length" )
# Simple input
self.assertRaises(__A,tokenizer_r.encode_plus,__A,max_length=__A,padding="max_length" )
# Simple input
self.assertRaises(
__A,tokenizer_r.batch_encode_plus,__A,max_length=__A,padding="max_length",)
# Pair input
self.assertRaises(__A,tokenizer_r.encode,__A,max_length=__A,padding="max_length" )
# Pair input
self.assertRaises(__A,tokenizer_r.encode_plus,__A,max_length=__A,padding="max_length" )
# Pair input
self.assertRaises(
__A,tokenizer_r.batch_encode_plus,__A,max_length=__A,padding="max_length",)
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : str = CodeGenTokenizer.from_pretrained(self.tmpdirname,pad_token="<pad>" )
# Simple input
_lowerCamelCase : Tuple = "This is a simple input"
_lowerCamelCase : Dict = ["This is a simple input looooooooong", "This is a simple input"]
_lowerCamelCase : Dict = ("This is a simple input", "This is a pair")
_lowerCamelCase : Dict = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
_lowerCamelCase : Dict = tokenizer.pad_token_id
_lowerCamelCase : Dict = tokenizer(__A,padding="max_length",max_length=3_0,return_tensors="np" )
_lowerCamelCase : int = tokenizer(__A,padding=__A,truncate=__A,return_tensors="np" )
_lowerCamelCase : List[Any] = tokenizer(*__A,padding="max_length",max_length=6_0,return_tensors="np" )
_lowerCamelCase : Tuple = tokenizer(__A,padding=__A,truncate=__A,return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1],3_0 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1],3_3 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1],6_0 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1],5_2 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : List[Any] = "$$$"
_lowerCamelCase : Tuple = CodeGenTokenizer.from_pretrained(self.tmpdirname,bos_token=__A,add_bos_token=__A )
_lowerCamelCase : List[str] = "This is a simple input"
_lowerCamelCase : Optional[Any] = ["This is a simple input 1", "This is a simple input 2"]
_lowerCamelCase : Union[str, Any] = tokenizer.bos_token_id
_lowerCamelCase : Any = tokenizer(__A )
_lowerCamelCase : List[str] = tokenizer(__A )
self.assertEqual(out_s.input_ids[0],__A )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_lowerCamelCase : int = tokenizer.decode(out_s.input_ids )
_lowerCamelCase : str = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0],__A )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : int = CodeGenTokenizer.from_pretrained("Salesforce/codegen-350M-mono" )
_lowerCamelCase : Optional[Any] = "\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"
_lowerCamelCase : Dict = "\nif len_a > len_b: result = a\nelse: result = b"
_lowerCamelCase : Any = tokenizer.encode(__A )
_lowerCamelCase : str = ["^#", re.escape("<|endoftext|>" ), "^'''", "^\"\"\"", "\n\n\n"]
_lowerCamelCase : List[Any] = tokenizer.decode(__A,truncate_before_pattern=__A )
self.assertEqual(__A,__A )
def lowerCamelCase_ ( self : Any ):
pass
| 44 | 0 |
'''simple docstring'''
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase):
__lowercase : int = BarthezTokenizer
__lowercase : Any = BarthezTokenizerFast
__lowercase : Dict = True
__lowercase : Optional[int] = True
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
super().setUp()
__snake_case = BarthezTokenizerFast.from_pretrained('''moussaKam/mbarthez''' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=__SCREAMING_SNAKE_CASE )
__snake_case = tokenizer
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case = '''<pad>'''
__snake_case = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 10_1122 )
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_1122 )
@require_torch
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
__snake_case = [0, 57, 3018, 7_0307, 91, 2]
__snake_case = self.tokenizer(
__SCREAMING_SNAKE_CASE , max_length=len(__SCREAMING_SNAKE_CASE ) , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
__snake_case = batch.input_ids.tolist()[0]
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__snake_case = self.get_tokenizer()
__snake_case = self.get_rust_tokenizer()
__snake_case = '''I was born in 92000, and this is falsé.'''
__snake_case = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
__snake_case = rust_tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
__snake_case = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = self.get_rust_tokenizer()
__snake_case = tokenizer.encode(__SCREAMING_SNAKE_CASE )
__snake_case = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@slow
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = {'''input_ids''': [[0, 490, 1_4328, 4507, 354, 47, 4_3669, 95, 25, 7_8117, 2_0215, 1_9779, 190, 22, 400, 4, 3_5343, 8_0310, 603, 86, 2_4937, 105, 3_3438, 9_4762, 196, 3_9642, 7, 15, 1_5933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0534, 87, 25, 66, 3358, 196, 5_5289, 8, 8_2961, 81, 2204, 7_5203, 7, 15, 763, 1_2956, 216, 178, 1_4328, 9595, 1377, 6_9693, 7, 448, 7_1021, 196, 1_8106, 1437, 1_3974, 108, 9083, 4, 4_9315, 7, 39, 86, 1326, 2793, 4_6333, 4, 448, 196, 7_4588, 7, 4_9315, 7, 39, 21, 822, 3_8470, 74, 21, 6_6723, 6_2480, 8, 2_2050, 5, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
__snake_case = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=__SCREAMING_SNAKE_CASE , model_name='''moussaKam/mbarthez''' , revision='''c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6''' , sequences=__SCREAMING_SNAKE_CASE , )
| 24 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class UpperCAmelCase__ :
def __init__( self : Any,__A : int=2,__A : Any=3,__A : Optional[int]=6_4,__A : Tuple=None ):
_lowerCamelCase : int = np.random.default_rng(__A )
_lowerCamelCase : List[str] = length
_lowerCamelCase : Optional[Any] = rng.normal(size=(length,) ).astype(np.floataa )
_lowerCamelCase : Optional[int] = a * self.x + b + rng.normal(scale=0.1,size=(length,) ).astype(np.floataa )
def __len__( self : Dict ):
return self.length
def __getitem__( self : str,__A : List[str] ):
return {"x": self.x[i], "y": self.y[i]}
class UpperCAmelCase__ ( torch.nn.Module ):
def __init__( self : Union[str, Any],__A : Optional[Any]=0,__A : Optional[int]=0,__A : Dict=False ):
super().__init__()
_lowerCamelCase : Tuple = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
_lowerCamelCase : List[str] = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
_lowerCamelCase : Optional[int] = True
def lowerCamelCase_ ( self : List[str],__A : Tuple=None ):
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
_lowerCamelCase : Optional[Any] = False
return x * self.a[0] + self.b[0]
class UpperCAmelCase__ ( torch.nn.Module ):
def __init__( self : Union[str, Any],__A : List[str]=0,__A : List[str]=0,__A : int=False ):
super().__init__()
_lowerCamelCase : Optional[int] = torch.nn.Parameter(torch.tensor(__A ).float() )
_lowerCamelCase : Dict = torch.nn.Parameter(torch.tensor(__A ).float() )
_lowerCamelCase : Tuple = True
def lowerCamelCase_ ( self : str,__A : List[Any]=None ):
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
_lowerCamelCase : Optional[Any] = False
return x * self.a + self.b
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : int = 16 ):
"""simple docstring"""
from datasets import load_dataset
from transformers import AutoTokenizer
_lowerCamelCase : Tuple = AutoTokenizer.from_pretrained("bert-base-cased" )
_lowerCamelCase : List[Any] = {"train": "tests/test_samples/MRPC/train.csv", "validation": "tests/test_samples/MRPC/dev.csv"}
_lowerCamelCase : int = load_dataset("csv" , data_files=_lowerCAmelCase )
_lowerCamelCase : Dict = datasets["train"].unique("label" )
_lowerCamelCase : Optional[Any] = {v: i for i, v in enumerate(_lowerCAmelCase )}
def tokenize_function(_lowerCAmelCase : int ):
# max_length=None => use the model max length (it's actually the default)
_lowerCamelCase : Optional[int] = tokenizer(
examples["sentence1"] , examples["sentence2"] , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase , padding="max_length" )
if "label" in examples:
_lowerCamelCase : str = [label_to_id[l] for l in examples["label"]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_lowerCamelCase : Optional[Any] = datasets.map(
_lowerCAmelCase , batched=_lowerCAmelCase , remove_columns=["sentence1", "sentence2", "label"] , )
def collate_fn(_lowerCAmelCase : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowerCAmelCase , padding="max_length" , max_length=128 , return_tensors="pt" )
return tokenizer.pad(_lowerCAmelCase , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
_lowerCamelCase : str = DataLoader(tokenized_datasets["train"] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=2 )
_lowerCamelCase : Optional[int] = DataLoader(tokenized_datasets["validation"] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=1 )
return train_dataloader, eval_dataloader
| 44 | 0 |
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--txt2img_unclip',
default='kakaobrain/karlo-v1-alpha',
type=str,
required=False,
help='The pretrained txt2img unclip.',
)
a_ = parser.parse_args()
a_ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
a_ = CLIPImageProcessor()
a_ = CLIPVisionModelWithProjection.from_pretrained('openai/clip-vit-large-patch14')
a_ = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 25 |
'''simple docstring'''
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Optional[Any] = False, False, False
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = None
lowerCAmelCase_ = True
lowerCAmelCase_ = True
lowerCAmelCase_ = None
# Automatically constructed
lowerCAmelCase_ = "dict"
lowerCAmelCase_ = pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
lowerCAmelCase_ = field(default='Audio' , init=A , repr=A )
def __call__( self : Tuple ):
return self.pa_type
def lowerCamelCase_ ( self : Any,__A : Union[str, bytes, dict] ):
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("To support encoding audio data, please install 'soundfile'." ) from err
if isinstance(__A,__A ):
return {"bytes": None, "path": value}
elif isinstance(__A,__A ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
_lowerCamelCase : List[Any] = BytesIO()
sf.write(__A,value["array"],value["sampling_rate"],format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("pcm" ):
# "PCM" only has raw audio bytes
if value.get("sampling_rate" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object" )
if value.get("bytes" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
_lowerCamelCase : Dict = np.frombuffer(value["bytes"],dtype=np.intaa ).astype(np.floataa ) / 3_2_7_6_7
else:
_lowerCamelCase : str = np.memmap(value["path"],dtype="h",mode="r" ).astype(np.floataa ) / 3_2_7_6_7
_lowerCamelCase : Optional[int] = BytesIO(bytes() )
sf.write(__A,__A,value["sampling_rate"],format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f'An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.' )
def lowerCamelCase_ ( self : Optional[Any],__A : dict,__A : Optional[Dict[str, Union[str, bool, None]]] = None ):
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead." )
_lowerCamelCase , _lowerCamelCase : Optional[Any] = (value["path"], BytesIO(value["bytes"] )) if value["bytes"] is not None else (value["path"], None)
if path is None and file is None:
raise ValueError(f'An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'." ) from err
_lowerCamelCase : Tuple = xsplitext(__A )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
if file is None:
_lowerCamelCase : Tuple = token_per_repo_id or {}
_lowerCamelCase : Union[str, Any] = path.split("::" )[-1]
try:
_lowerCamelCase : str = string_to_dict(__A,config.HUB_DATASETS_URL )["repo_id"]
_lowerCamelCase : str = token_per_repo_id[repo_id]
except (ValueError, KeyError):
_lowerCamelCase : Any = None
with xopen(__A,"rb",use_auth_token=__A ) as f:
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = sf.read(__A )
else:
_lowerCamelCase , _lowerCamelCase : str = sf.read(__A )
_lowerCamelCase : List[str] = array.T
if self.mono:
_lowerCamelCase : List[str] = librosa.to_mono(__A )
if self.sampling_rate and self.sampling_rate != sampling_rate:
_lowerCamelCase : List[str] = librosa.resample(__A,orig_sr=__A,target_sr=self.sampling_rate )
_lowerCamelCase : Optional[Any] = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def lowerCamelCase_ ( self : Any ):
from .features import Value
if self.decode:
raise ValueError("Cannot flatten a decoded Audio feature." )
return {
"bytes": Value("binary" ),
"path": Value("string" ),
}
def lowerCamelCase_ ( self : List[str],__A : Union[pa.StringArray, pa.StructArray] ):
if pa.types.is_string(storage.type ):
_lowerCamelCase : Any = pa.array([None] * len(__A ),type=pa.binary() )
_lowerCamelCase : int = pa.StructArray.from_arrays([bytes_array, storage],["bytes", "path"],mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
_lowerCamelCase : Dict = pa.array([None] * len(__A ),type=pa.string() )
_lowerCamelCase : Any = pa.StructArray.from_arrays([storage, path_array],["bytes", "path"],mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("array" ):
_lowerCamelCase : Tuple = pa.array([Audio().encode_example(__A ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
_lowerCamelCase : Tuple = storage.field("bytes" )
else:
_lowerCamelCase : Any = pa.array([None] * len(__A ),type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
_lowerCamelCase : List[str] = storage.field("path" )
else:
_lowerCamelCase : Tuple = pa.array([None] * len(__A ),type=pa.string() )
_lowerCamelCase : Tuple = pa.StructArray.from_arrays([bytes_array, path_array],["bytes", "path"],mask=storage.is_null() )
return array_cast(__A,self.pa_type )
def lowerCamelCase_ ( self : str,__A : pa.StructArray ):
@no_op_if_value_is_null
def path_to_bytes(__A : Dict ):
with xopen(__A,"rb" ) as f:
_lowerCamelCase : Any = f.read()
return bytes_
_lowerCamelCase : int = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
],type=pa.binary(),)
_lowerCamelCase : str = pa.array(
[os.path.basename(__A ) if path is not None else None for path in storage.field("path" ).to_pylist()],type=pa.string(),)
_lowerCamelCase : Dict = pa.StructArray.from_arrays([bytes_array, path_array],["bytes", "path"],mask=bytes_array.is_null() )
return array_cast(__A,self.pa_type )
| 44 | 0 |
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
__UpperCamelCase = datasets.utils.logging.get_logger(__name__)
__UpperCamelCase = ["names", "prefix"]
__UpperCamelCase = ["warn_bad_lines", "error_bad_lines", "mangle_dupe_cols"]
__UpperCamelCase = ["encoding_errors", "on_bad_lines"]
__UpperCamelCase = ["date_format"]
@dataclass
class _A ( datasets.BuilderConfig ):
lowercase__: str = ","
lowercase__: Optional[str] = None
lowercase__: Optional[Union[int, List[int], str]] = "infer"
lowercase__: Optional[List[str]] = None
lowercase__: Optional[List[str]] = None
lowercase__: Optional[Union[int, str, List[int], List[str]]] = None
lowercase__: Optional[Union[List[int], List[str]]] = None
lowercase__: Optional[str] = None
lowercase__: bool = True
lowercase__: Optional[Literal["c", "python", "pyarrow"]] = None
lowercase__: Dict[Union[int, str], Callable[[Any], Any]] = None
lowercase__: Optional[list] = None
lowercase__: Optional[list] = None
lowercase__: bool = False
lowercase__: Optional[Union[int, List[int]]] = None
lowercase__: Optional[int] = None
lowercase__: Optional[Union[str, List[str]]] = None
lowercase__: bool = True
lowercase__: bool = True
lowercase__: bool = False
lowercase__: bool = True
lowercase__: Optional[str] = None
lowercase__: str = "."
lowercase__: Optional[str] = None
lowercase__: str = '"'
lowercase__: int = 0
lowercase__: Optional[str] = None
lowercase__: Optional[str] = None
lowercase__: Optional[str] = None
lowercase__: Optional[str] = None
lowercase__: bool = True
lowercase__: bool = True
lowercase__: int = 0
lowercase__: bool = True
lowercase__: bool = False
lowercase__: Optional[str] = None
lowercase__: int = 10000
lowercase__: Optional[datasets.Features] = None
lowercase__: Optional[str] = "strict"
lowercase__: Literal["error", "warn", "skip"] = "error"
lowercase__: Optional[str] = None
def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
if self.delimiter is not None:
__snake_case : Any = self.delimiter
if self.column_names is not None:
__snake_case : Optional[Any] = self.column_names
@property
def lowercase__ ( self : int ) -> List[str]:
"""simple docstring"""
__snake_case : str = {
"""sep""": self.sep,
"""header""": self.header,
"""names""": self.names,
"""index_col""": self.index_col,
"""usecols""": self.usecols,
"""prefix""": self.prefix,
"""mangle_dupe_cols""": self.mangle_dupe_cols,
"""engine""": self.engine,
"""converters""": self.converters,
"""true_values""": self.true_values,
"""false_values""": self.false_values,
"""skipinitialspace""": self.skipinitialspace,
"""skiprows""": self.skiprows,
"""nrows""": self.nrows,
"""na_values""": self.na_values,
"""keep_default_na""": self.keep_default_na,
"""na_filter""": self.na_filter,
"""verbose""": self.verbose,
"""skip_blank_lines""": self.skip_blank_lines,
"""thousands""": self.thousands,
"""decimal""": self.decimal,
"""lineterminator""": self.lineterminator,
"""quotechar""": self.quotechar,
"""quoting""": self.quoting,
"""escapechar""": self.escapechar,
"""comment""": self.comment,
"""encoding""": self.encoding,
"""dialect""": self.dialect,
"""error_bad_lines""": self.error_bad_lines,
"""warn_bad_lines""": self.warn_bad_lines,
"""skipfooter""": self.skipfooter,
"""doublequote""": self.doublequote,
"""memory_map""": self.memory_map,
"""float_precision""": self.float_precision,
"""chunksize""": self.chunksize,
"""encoding_errors""": self.encoding_errors,
"""on_bad_lines""": self.on_bad_lines,
"""date_format""": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , __magic_name__ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class _A ( datasets.ArrowBasedBuilder ):
lowercase__: List[Any] = CsvConfig
def lowercase__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def lowercase__ ( self : List[str] , __magic_name__ : Optional[int] ) -> Dict:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
__snake_case : Tuple = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__magic_name__ , (str, list, tuple) ):
__snake_case : int = data_files
if isinstance(__magic_name__ , __magic_name__ ):
__snake_case : Optional[int] = [files]
__snake_case : Tuple = [dl_manager.iter_files(__magic_name__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
__snake_case : Optional[Any] = []
for split_name, files in data_files.items():
if isinstance(__magic_name__ , __magic_name__ ):
__snake_case : Optional[Any] = [files]
__snake_case : Optional[int] = [dl_manager.iter_files(__magic_name__ ) for file in files]
splits.append(datasets.SplitGenerator(name=__magic_name__ , gen_kwargs={"""files""": files} ) )
return splits
def lowercase__ ( self : Optional[Any] , __magic_name__ : pa.Table ) -> pa.Table:
"""simple docstring"""
if self.config.features is not None:
__snake_case : Optional[Any] = self.config.features.arrow_schema
if all(not require_storage_cast(__magic_name__ ) for feature in self.config.features.values() ):
# cheaper cast
__snake_case : Union[str, Any] = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=__magic_name__ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
__snake_case : Dict = table_cast(__magic_name__ , __magic_name__ )
return pa_table
def lowercase__ ( self : str , __magic_name__ : Union[str, Any] ) -> Any:
"""simple docstring"""
__snake_case : List[str] = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
__snake_case : Union[str, Any] = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(__magic_name__ ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(__magic_name__ ) ):
__snake_case : Optional[int] = pd.read_csv(__magic_name__ , iterator=__magic_name__ , dtype=__magic_name__ , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(__magic_name__ ):
__snake_case : List[str] = pa.Table.from_pandas(__magic_name__ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(__magic_name__ )
except ValueError as e:
logger.error(f'''Failed to read file \'{file}\' with error {type(__magic_name__ )}: {e}''' )
raise
| 26 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : str = {
'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'glpn'
def __init__( self : Tuple,__A : Optional[int]=3,__A : Optional[int]=4,__A : str=[2, 2, 2, 2],__A : Union[str, Any]=[8, 4, 2, 1],__A : Tuple=[3_2, 6_4, 1_6_0, 2_5_6],__A : int=[7, 3, 3, 3],__A : str=[4, 2, 2, 2],__A : int=[1, 2, 5, 8],__A : List[Any]=[4, 4, 4, 4],__A : Optional[int]="gelu",__A : int=0.0,__A : Tuple=0.0,__A : Tuple=0.02,__A : Optional[int]=0.1,__A : Optional[int]=1e-6,__A : Optional[int]=6_4,__A : Optional[Any]=1_0,__A : Tuple=-1,**__A : List[str],):
super().__init__(**__A )
_lowerCamelCase : Tuple = num_channels
_lowerCamelCase : Union[str, Any] = num_encoder_blocks
_lowerCamelCase : Dict = depths
_lowerCamelCase : List[Any] = sr_ratios
_lowerCamelCase : str = hidden_sizes
_lowerCamelCase : Any = patch_sizes
_lowerCamelCase : Any = strides
_lowerCamelCase : Dict = mlp_ratios
_lowerCamelCase : int = num_attention_heads
_lowerCamelCase : List[Any] = hidden_act
_lowerCamelCase : str = hidden_dropout_prob
_lowerCamelCase : List[Any] = attention_probs_dropout_prob
_lowerCamelCase : Optional[int] = initializer_range
_lowerCamelCase : Union[str, Any] = drop_path_rate
_lowerCamelCase : str = layer_norm_eps
_lowerCamelCase : Tuple = decoder_hidden_size
_lowerCamelCase : int = max_depth
_lowerCamelCase : Dict = head_in_index
| 44 | 0 |
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
__A : List[Any] = logging.getLogger(__name__)
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE = 10 , _SCREAMING_SNAKE_CASE = 2 ) -> List[Any]:
"""simple docstring"""
def get_dataset(_SCREAMING_SNAKE_CASE ):
_A = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(_SCREAMING_SNAKE_CASE , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
_A = get_dataset(_SCREAMING_SNAKE_CASE )
_A = get_dataset(_SCREAMING_SNAKE_CASE )
_A = DataLoader(_SCREAMING_SNAKE_CASE , shuffle=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , num_workers=4 )
_A = DataLoader(_SCREAMING_SNAKE_CASE , shuffle=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , num_workers=4 )
return (train_dataloader, valid_dataloader)
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Any:
"""simple docstring"""
_A = []
for epoch in range(_SCREAMING_SNAKE_CASE ):
# Train quickly
model.train()
for batch in dataloader:
_A, _A = batch
_A = model(_SCREAMING_SNAKE_CASE )
_A = torch.nn.functional.mse_loss(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
accelerator.backward(_SCREAMING_SNAKE_CASE )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class lowerCamelCase( nn.Module ):
'''simple docstring'''
def __init__( self ):
super().__init__()
_A = nn.Parameter(torch.randn(1 ) )
_A = nn.Parameter(torch.randn(1 ) )
def lowerCAmelCase__ ( self , snake_case_ ):
return x * self.a + self.b
class lowerCamelCase( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
_A = DummyModel()
_A = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_A, _A = dummy_dataloaders()
_A = ProjectConfiguration(total_limit=1 , project_dir=snake_case_ , automatic_checkpoint_naming=snake_case_ )
# Train baseline
_A = Accelerator(project_config=snake_case_ )
_A, _A, _A, _A = accelerator.prepare(
snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def lowerCAmelCase__ ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
_A = DummyModel()
_A = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_A, _A = dummy_dataloaders()
# Train baseline
_A = Accelerator()
_A, _A, _A, _A = accelerator.prepare(
snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Save initial
_A = os.path.join(snake_case_ , 'initial' )
accelerator.save_state(snake_case_ )
((_A), (_A)) = model.a.item(), model.b.item()
_A = optimizer.state_dict()
_A = train(3 , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
((_A), (_A)) = model.a.item(), model.b.item()
_A = optimizer.state_dict()
# Train partially
set_seed(42 )
_A = DummyModel()
_A = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_A, _A = dummy_dataloaders()
_A = Accelerator()
_A, _A, _A, _A = accelerator.prepare(
snake_case_ , snake_case_ , snake_case_ , snake_case_ )
accelerator.load_state(snake_case_ )
((_A), (_A)) = model.a.item(), model.b.item()
_A = optimizer.state_dict()
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
_A = train(2 , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Save everything
_A = os.path.join(snake_case_ , 'checkpoint' )
accelerator.save_state(snake_case_ )
# Load everything back in and make sure all states work
accelerator.load_state(snake_case_ )
test_rands += train(1 , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
((_A), (_A)) = model.a.item(), model.b.item()
_A = optimizer.state_dict()
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
_A = DummyModel()
_A = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_A, _A = dummy_dataloaders()
_A = ProjectConfiguration(automatic_checkpoint_naming=snake_case_ )
# Train baseline
_A = Accelerator(project_dir=snake_case_ , project_config=snake_case_ )
_A, _A, _A, _A = accelerator.prepare(
snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Save initial
accelerator.save_state()
((_A), (_A)) = model.a.item(), model.b.item()
_A = optimizer.state_dict()
_A = train(3 , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
((_A), (_A)) = model.a.item(), model.b.item()
_A = optimizer.state_dict()
# Train partially
set_seed(42 )
_A = DummyModel()
_A = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_A, _A = dummy_dataloaders()
_A = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=snake_case_ )
_A = Accelerator(project_dir=snake_case_ , project_config=snake_case_ )
_A, _A, _A, _A = accelerator.prepare(
snake_case_ , snake_case_ , snake_case_ , snake_case_ )
accelerator.load_state(os.path.join(snake_case_ , 'checkpoints' , 'checkpoint_0' ) )
((_A), (_A)) = model.a.item(), model.b.item()
_A = optimizer.state_dict()
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
_A = train(2 , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(snake_case_ , 'checkpoints' , 'checkpoint_1' ) )
test_rands += train(1 , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
((_A), (_A)) = model.a.item(), model.b.item()
_A = optimizer.state_dict()
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self ):
_A = torch.tensor([1, 2, 3] )
_A = torch.tensor([2, 3, 4] )
_A = DummyModel()
_A = torch.optim.Adam(net.parameters() )
_A = Accelerator()
with self.assertRaises(snake_case_ ) as ve:
accelerator.register_for_checkpointing(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
_A = str(ve.exception )
self.assertTrue('Item at index 0' in message )
self.assertTrue('Item at index 1' in message )
self.assertFalse('Item at index 2' in message )
self.assertFalse('Item at index 3' in message )
def lowerCAmelCase__ ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
_A = DummyModel()
_A = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_A = torch.optim.lr_scheduler.StepLR(snake_case_ , step_size=1 , gamma=0.99 )
_A, _A = dummy_dataloaders()
_A = ProjectConfiguration(automatic_checkpoint_naming=snake_case_ )
# Train baseline
_A = Accelerator(project_dir=snake_case_ , project_config=snake_case_ )
_A, _A, _A, _A, _A = accelerator.prepare(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Save initial
accelerator.save_state()
_A = scheduler.state_dict()
train(3 , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
self.assertNotEqual(snake_case_ , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(snake_case_ , 'checkpoints' , 'checkpoint_0' ) )
self.assertEqual(snake_case_ , scheduler.state_dict() )
def lowerCAmelCase__ ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
_A = DummyModel()
_A = ProjectConfiguration(automatic_checkpoint_naming=snake_case_ , total_limit=2 )
# Train baseline
_A = Accelerator(project_dir=snake_case_ , project_config=snake_case_ )
_A = accelerator.prepare(snake_case_ )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(snake_case_ , 'checkpoints' , 'checkpoint_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(snake_case_ , 'checkpoints' , 'checkpoint_9' ) ) )
self.assertTrue(os.path.exists(os.path.join(snake_case_ , 'checkpoints' , 'checkpoint_10' ) ) )
@require_cuda
def lowerCAmelCase__ ( self ):
_A = ['torchrun', F"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
execute_subprocess_async(snake_case_ , env=os.environ.copy() )
if __name__ == "__main__":
__A : Tuple = "/tmp/accelerate/state_checkpointing"
__A : List[str] = DummyModel()
__A : Union[str, Any] = torch.optim.Adam(params=model.parameters(), lr=1E-3)
__A : Optional[Any] = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.9_9)
__A , __A : Dict = dummy_dataloaders()
__A : Any = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
__A : Optional[Any] = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="no")
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
__A , __A , __A , __A , __A : int = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
__A , __A : Optional[int] = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
__A : Optional[int] = group["params"][0].device
break
assert param_device.type == accelerator.device.type
__A : str = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="cpu")
for group in optimizer.param_groups:
__A : int = group["params"][0].device
break
assert (
param_device.type == torch.device("cpu").type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="on_device")
for group in optimizer.param_groups:
__A : Tuple = group["params"][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match="Unsupported optimizer map location passed"):
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="invalid")
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 27 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
UpperCAmelCase_ : str = logging.get_logger(__name__)
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = ['input_features', 'attention_mask']
def __init__( self : Any,__A : List[Any]=8_0,__A : Dict=1_6_0_0_0,__A : Tuple=0.0,__A : Dict=1_0,__A : int=2_5,__A : Union[str, Any]="hamming_window",__A : List[str]=32768.0,__A : Union[str, Any]=0.97,__A : str=1.0,__A : Union[str, Any]=True,__A : Tuple=True,__A : Optional[Any]=False,**__A : Optional[Any],):
super().__init__(feature_size=__A,sampling_rate=__A,padding_value=__A,**__A )
_lowerCamelCase : Dict = feature_size
_lowerCamelCase : List[str] = sampling_rate
_lowerCamelCase : Any = padding_value
_lowerCamelCase : Dict = hop_length
_lowerCamelCase : Tuple = win_length
_lowerCamelCase : str = frame_signal_scale
_lowerCamelCase : List[str] = preemphasis_coeff
_lowerCamelCase : List[str] = mel_floor
_lowerCamelCase : str = normalize_means
_lowerCamelCase : Any = normalize_vars
_lowerCamelCase : List[str] = win_function
_lowerCamelCase : Tuple = return_attention_mask
_lowerCamelCase : List[Any] = win_length * sampling_rate // 1_0_0_0
_lowerCamelCase : List[Any] = hop_length * sampling_rate // 1_0_0_0
_lowerCamelCase : Any = optimal_fft_length(self.sample_size )
_lowerCamelCase : Dict = (self.n_fft // 2) + 1
def lowerCamelCase_ ( self : Any,__A : np.array ):
if self.win_function == "hamming_window":
_lowerCamelCase : Any = window_function(window_length=self.sample_size,name=self.win_function,periodic=__A )
else:
_lowerCamelCase : Optional[int] = window_function(window_length=self.sample_size,name=self.win_function )
_lowerCamelCase : int = mel_filter_bank(
num_frequency_bins=self.n_freqs,num_mel_filters=self.feature_size,min_frequency=0.0,max_frequency=self.sampling_rate / 2.0,sampling_rate=self.sampling_rate,)
_lowerCamelCase : List[str] = spectrogram(
one_waveform * self.frame_signal_scale,window=__A,frame_length=self.sample_size,hop_length=self.sample_stride,fft_length=self.n_fft,center=__A,preemphasis=self.preemphasis_coeff,mel_filters=__A,mel_floor=self.mel_floor,log_mel="log",)
return msfc_features.T
def lowerCamelCase_ ( self : Optional[int],__A : List[str],__A : Dict,__A : int ):
# make sure we normalize float32 arrays
if self.normalize_means:
_lowerCamelCase : Optional[Any] = x[:input_length].mean(axis=0 )
_lowerCamelCase : Optional[int] = np.subtract(__A,__A )
if self.normalize_vars:
_lowerCamelCase : int = x[:input_length].std(axis=0 )
_lowerCamelCase : Any = np.divide(__A,__A )
if input_length < x.shape[0]:
_lowerCamelCase : Tuple = padding_value
# make sure array is in float32
_lowerCamelCase : Optional[int] = x.astype(np.floataa )
return x
def lowerCamelCase_ ( self : Any,__A : List[np.ndarray],__A : Optional[np.ndarray] = None ):
_lowerCamelCase : Optional[int] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(__A,__A,self.padding_value ) for x, n in zip(__A,__A )]
def __call__( self : Optional[Any],__A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]],__A : Union[bool, str, PaddingStrategy] = False,__A : Optional[int] = None,__A : bool = False,__A : Optional[int] = None,__A : Optional[bool] = None,__A : Optional[Union[str, TensorType]] = None,__A : Optional[int] = None,**__A : Optional[Any],):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
f' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'
f' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
_lowerCamelCase : List[str] = isinstance(__A,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
_lowerCamelCase : List[str] = is_batched_numpy or (
isinstance(__A,(list, tuple) ) and (isinstance(raw_speech[0],(np.ndarray, tuple, list) ))
)
if is_batched:
_lowerCamelCase : List[Any] = [np.asarray(__A,dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__A,np.ndarray ):
_lowerCamelCase : Dict = np.asarray(__A,dtype=np.floataa )
elif isinstance(__A,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_lowerCamelCase : Tuple = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_lowerCamelCase : Tuple = [raw_speech]
# extract fbank features
_lowerCamelCase : str = [self._extract_mfsc_features(__A ) for one_waveform in raw_speech]
# convert into correct format for padding
_lowerCamelCase : Union[str, Any] = BatchFeature({"input_features": features} )
_lowerCamelCase : List[Any] = self.pad(
__A,padding=__A,max_length=__A,truncation=__A,pad_to_multiple_of=__A,return_attention_mask=__A,**__A,)
# make sure list is in array format
_lowerCamelCase : Optional[Any] = padded_inputs.get("input_features" )
if isinstance(input_features[0],__A ):
_lowerCamelCase : int = [np.asarray(__A,dtype=np.floataa ) for feature in input_features]
_lowerCamelCase : Dict = padded_inputs.get("attention_mask" )
if attention_mask is not None:
_lowerCamelCase : Dict = [np.asarray(__A,dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
_lowerCamelCase : Dict = (
np.array(__A,dtype=np.intaa )
if self._get_padding_strategies(__A,max_length=__A ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
_lowerCamelCase : Tuple = self.normalize(
padded_inputs["input_features"],attention_mask=__A )
if return_tensors is not None:
_lowerCamelCase : Dict = padded_inputs.convert_to_tensors(__A )
return padded_inputs
| 44 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : Optional[Any] = MgpstrTokenizer
A : Any = False
A : Any = {}
A : List[str] = False
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().setUp()
# fmt: off
SCREAMING_SNAKE_CASE : List[Any] = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
SCREAMING_SNAKE_CASE : List[Any] = dict(zip(A, range(len(A ) ) ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file, 'w', encoding='utf-8' ) as fp:
fp.write(json.dumps(A ) + '\n' )
def UpperCamelCase_ ( self, **A ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname, **A )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = 'tester'
SCREAMING_SNAKE_CASE : List[Any] = 'tester'
return input_text, output_text
@unittest.skip('MGP-STR always lower cases letters.' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.get_tokenizers(do_lower_case=A )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
SCREAMING_SNAKE_CASE : Union[str, Any] = '[SPECIAL_TOKEN]'
tokenizer.add_special_tokens({'cls_token': special_token} )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode([special_token], add_special_tokens=A )
self.assertEqual(len(A ), 1 )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.decode(A, skip_special_tokens=A )
self.assertTrue(special_token not in decoded )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self.get_input_output_texts(A )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.tokenize(A )
SCREAMING_SNAKE_CASE : Any = tokenizer.convert_tokens_to_ids(A )
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.encode(A, add_special_tokens=A )
self.assertListEqual(A, A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens(A )
self.assertNotEqual(len(A ), 0 )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.decode(A )
self.assertIsInstance(A, A )
self.assertEqual(text_a.replace(' ', '' ), A )
@unittest.skip('MGP-STR tokenizer only handles one sequence.' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
@unittest.skip('inputs cannot be pretokenized in MgpstrTokenizer' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
| 28 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Dict = [
('bert.bert', 'visual_bert'),
('bert.cls', 'cls'),
('bert.classifier', 'cls'),
('token_type_embeddings_visual', 'visual_token_type_embeddings'),
('position_embeddings_visual', 'visual_position_embeddings'),
('projection', 'visual_projection'),
]
UpperCAmelCase_ : int = [
'nlvr2_coco_pre_trained.th',
'nlvr2_fine_tuned.th',
'nlvr2_pre_trained.th',
'vcr_coco_pre_train.th',
'vcr_fine_tune.th',
'vcr_pre_train.th',
'vqa_coco_pre_trained.th',
'vqa_fine_tuned.th',
'vqa_pre_trained.th',
]
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = torch.load(_lowerCAmelCase , map_location="cpu" )
return sd
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple=rename_keys_prefix ):
"""simple docstring"""
_lowerCamelCase : Any = OrderedDict()
_lowerCamelCase : str = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
_lowerCamelCase : Any = key
for name_pair in rename_keys_prefix:
_lowerCamelCase : Dict = new_key.replace(name_pair[0] , name_pair[1] )
_lowerCamelCase : Any = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
_lowerCamelCase : List[str] = new_d["cls.predictions.bias"]
return new_d
@torch.no_grad()
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : Dict ):
"""simple docstring"""
assert (
checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS
), F'The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'
# Get Config
if "pre" in checkpoint_path:
_lowerCamelCase : Optional[int] = "pretraining"
if "vcr" in checkpoint_path:
_lowerCamelCase : Union[str, Any] = {"visual_embedding_dim": 512}
elif "vqa_advanced" in checkpoint_path:
_lowerCamelCase : List[str] = {"visual_embedding_dim": 2048}
elif "vqa" in checkpoint_path:
_lowerCamelCase : int = {"visual_embedding_dim": 2048}
elif "nlvr" in checkpoint_path:
_lowerCamelCase : List[str] = {"visual_embedding_dim": 1024}
else:
raise NotImplementedError(F'No implementation found for `{checkpoint_path}`.' )
else:
if "vcr" in checkpoint_path:
_lowerCamelCase : Any = {"visual_embedding_dim": 512}
_lowerCamelCase : List[Any] = "multichoice"
elif "vqa_advanced" in checkpoint_path:
_lowerCamelCase : Tuple = {"visual_embedding_dim": 2048}
_lowerCamelCase : Dict = "vqa_advanced"
elif "vqa" in checkpoint_path:
_lowerCamelCase : Union[str, Any] = {"visual_embedding_dim": 2048, "num_labels": 3129}
_lowerCamelCase : Optional[int] = "vqa"
elif "nlvr" in checkpoint_path:
_lowerCamelCase : Tuple = {
"visual_embedding_dim": 1024,
"num_labels": 2,
}
_lowerCamelCase : Optional[Any] = "nlvr"
_lowerCamelCase : str = VisualBertConfig(**_lowerCAmelCase )
# Load State Dict
_lowerCamelCase : str = load_state_dict(_lowerCAmelCase )
_lowerCamelCase : List[str] = get_new_dict(_lowerCAmelCase , _lowerCAmelCase )
if model_type == "pretraining":
_lowerCamelCase : List[Any] = VisualBertForPreTraining(_lowerCAmelCase )
elif model_type == "vqa":
_lowerCamelCase : Dict = VisualBertForQuestionAnswering(_lowerCAmelCase )
elif model_type == "nlvr":
_lowerCamelCase : Tuple = VisualBertForVisualReasoning(_lowerCAmelCase )
elif model_type == "multichoice":
_lowerCamelCase : str = VisualBertForMultipleChoice(_lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
# Save Checkpoints
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('orig_checkpoint_path', type=str, help='A path to .th on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', type=str, help='Path to the output PyTorch model.')
UpperCAmelCase_ : Tuple = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 44 | 0 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ ):
if not nums: # Makes sure that the list is not empty
raise ValueError('''List is empty''' )
lowerCamelCase_ = sum(lowerCAmelCase__ ) / len(lowerCAmelCase__ ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29 |
'''simple docstring'''
import functools
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : list[int] ):
"""simple docstring"""
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or not all(isinstance(_lowerCAmelCase , _lowerCAmelCase ) for day in days ):
raise ValueError("The parameter days should be a list of integers" )
if len(_lowerCAmelCase ) != 3 or not all(isinstance(_lowerCAmelCase , _lowerCAmelCase ) for cost in costs ):
raise ValueError("The parameter costs should be a list of three integers" )
if len(_lowerCAmelCase ) == 0:
return 0
if min(_lowerCAmelCase ) <= 0:
raise ValueError("All days elements should be greater than 0" )
if max(_lowerCAmelCase ) >= 366:
raise ValueError("All days elements should be less than 366" )
_lowerCamelCase : Union[str, Any] = set(_lowerCAmelCase )
@functools.cache
def dynamic_programming(_lowerCAmelCase : int ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44 | 0 |
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
__a = 299_792_458
# Symbols
__a ,__a ,__a ,__a = symbols('ct x y z')
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if velocity > c:
raise ValueError('''Speed must not exceed light speed 299,792,458 [m/s]!''' )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError('''Speed must be greater than or equal to 1!''' )
return velocity / c
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
return 1 / sqrt(1 - beta(_lowercase ) ** 2 )
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
return np.array(
[
[gamma(_lowercase ), -gamma(_lowercase ) * beta(_lowercase ), 0, 0],
[-gamma(_lowercase ) * beta(_lowercase ), gamma(_lowercase ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def lowerCamelCase__ ( _lowercase , _lowercase = None ):
'''simple docstring'''
if event is None:
UpperCAmelCase_ : Optional[Any] = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(_lowercase ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
__a = transform(29_979_245)
print('Example of four vector: ')
print(F"""ct' = {four_vector[0]}""")
print(F"""x' = {four_vector[1]}""")
print(F"""y' = {four_vector[2]}""")
print(F"""z' = {four_vector[3]}""")
# Substitute symbols with numerical values
__a = {ct: c, x: 1, y: 1, z: 1}
__a = [four_vector[i].subs(sub_dict) for i in range(4)]
print(F"""\n{numerical_vector}""")
| 30 |
'''simple docstring'''
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = SwinConfig.from_pretrained(
"microsoft/swin-tiny-patch4-window7-224" , out_features=["stage1", "stage2", "stage3", "stage4"] )
_lowerCamelCase : Dict = MaskFormerConfig(backbone_config=_lowerCAmelCase )
_lowerCamelCase : Tuple = "huggingface/label-files"
if "ade20k-full" in model_name:
# this should be ok
_lowerCamelCase : List[Any] = 847
_lowerCamelCase : str = "maskformer-ade20k-full-id2label.json"
elif "ade" in model_name:
# this should be ok
_lowerCamelCase : Optional[int] = 150
_lowerCamelCase : Union[str, Any] = "ade20k-id2label.json"
elif "coco-stuff" in model_name:
# this should be ok
_lowerCamelCase : Union[str, Any] = 171
_lowerCamelCase : str = "maskformer-coco-stuff-id2label.json"
elif "coco" in model_name:
# TODO
_lowerCamelCase : Optional[int] = 133
_lowerCamelCase : Any = "coco-panoptic-id2label.json"
elif "cityscapes" in model_name:
# this should be ok
_lowerCamelCase : str = 19
_lowerCamelCase : Tuple = "cityscapes-id2label.json"
elif "vistas" in model_name:
# this should be ok
_lowerCamelCase : List[Any] = 65
_lowerCamelCase : Optional[int] = "mapillary-vistas-id2label.json"
_lowerCamelCase : Any = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" ) , "r" ) )
_lowerCamelCase : Optional[int] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
return config
def A_ ( _lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : Any = []
# stem
# fmt: off
rename_keys.append(("backbone.patch_embed.proj.weight", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.proj.bias", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "model.pixel_level_module.encoder.model.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "model.pixel_level_module.encoder.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_index', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((F'backbone.layers.{i}.downsample.reduction.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((F'backbone.layers.{i}.downsample.norm.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((F'backbone.layers.{i}.downsample.norm.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append((F'backbone.norm{i}.weight', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.weight') )
rename_keys.append((F'backbone.norm{i}.bias', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.bias') )
# FPN
rename_keys.append(("sem_seg_head.layer_4.weight", "model.pixel_level_module.decoder.fpn.stem.0.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.weight", "model.pixel_level_module.decoder.fpn.stem.1.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.bias", "model.pixel_level_module.decoder.fpn.stem.1.bias") )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((F'sem_seg_head.adapter_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight') )
rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight') )
rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias') )
rename_keys.append(("sem_seg_head.mask_features.weight", "model.pixel_level_module.decoder.mask_projection.weight") )
rename_keys.append(("sem_seg_head.mask_features.bias", "model.pixel_level_module.decoder.mask_projection.bias") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias') )
# cross-attention out projection
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias') )
# MLP 1
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight', F'model.transformer_module.decoder.layers.{idx}.fc1.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias', F'model.transformer_module.decoder.layers.{idx}.fc1.bias') )
# MLP 2
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight', F'model.transformer_module.decoder.layers.{idx}.fc2.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias', F'model.transformer_module.decoder.layers.{idx}.fc2.bias') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias') )
# layernorm 3 (final layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias') )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.weight", "model.transformer_module.decoder.layernorm.weight") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.bias", "model.transformer_module.decoder.layernorm.bias") )
# heads on top
rename_keys.append(("sem_seg_head.predictor.query_embed.weight", "model.transformer_module.queries_embedder.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.weight", "model.transformer_module.input_projection.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.bias", "model.transformer_module.input_projection.bias") )
rename_keys.append(("sem_seg_head.predictor.class_embed.weight", "class_predictor.weight") )
rename_keys.append(("sem_seg_head.predictor.class_embed.bias", "class_predictor.bias") )
for i in range(3 ):
rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.weight', F'mask_embedder.{i}.0.weight') )
rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.bias', F'mask_embedder.{i}.0.bias') )
# fmt: on
return rename_keys
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : Tuple = dct.pop(_lowerCAmelCase )
_lowerCamelCase : str = val
def A_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : str = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_lowerCamelCase : int = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_lowerCamelCase : Union[str, Any] = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.weight' )
_lowerCamelCase : List[str] = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : Optional[int] = in_proj_weight[:dim, :]
_lowerCamelCase : Optional[int] = in_proj_bias[: dim]
_lowerCamelCase : List[str] = in_proj_weight[
dim : dim * 2, :
]
_lowerCamelCase : List[Any] = in_proj_bias[
dim : dim * 2
]
_lowerCamelCase : List[Any] = in_proj_weight[
-dim :, :
]
_lowerCamelCase : Union[str, Any] = in_proj_bias[-dim :]
# fmt: on
def A_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : int = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
_lowerCamelCase : Tuple = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight' )
_lowerCamelCase : Optional[int] = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : Optional[Any] = in_proj_weight[: hidden_size, :]
_lowerCamelCase : Optional[int] = in_proj_bias[:config.hidden_size]
_lowerCamelCase : str = in_proj_weight[hidden_size : hidden_size * 2, :]
_lowerCamelCase : Dict = in_proj_bias[hidden_size : hidden_size * 2]
_lowerCamelCase : Any = in_proj_weight[-hidden_size :, :]
_lowerCamelCase : Any = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
_lowerCamelCase : Optional[int] = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight' )
_lowerCamelCase : List[Any] = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : Tuple = in_proj_weight[: hidden_size, :]
_lowerCamelCase : str = in_proj_bias[:config.hidden_size]
_lowerCamelCase : str = in_proj_weight[hidden_size : hidden_size * 2, :]
_lowerCamelCase : Optional[int] = in_proj_bias[hidden_size : hidden_size * 2]
_lowerCamelCase : int = in_proj_weight[-hidden_size :, :]
_lowerCamelCase : Optional[Any] = in_proj_bias[-hidden_size :]
# fmt: on
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : List[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : Optional[Any] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : bool = False ):
"""simple docstring"""
_lowerCamelCase : Tuple = get_maskformer_config(_lowerCAmelCase )
# load original state_dict
with open(_lowerCAmelCase , "rb" ) as f:
_lowerCamelCase : List[Any] = pickle.load(_lowerCAmelCase )
_lowerCamelCase : Optional[Any] = data["model"]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
_lowerCamelCase : List[Any] = create_rename_keys(_lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_swin_q_k_v(_lowerCAmelCase , config.backbone_config )
read_in_decoder_q_k_v(_lowerCAmelCase , _lowerCAmelCase )
# update to torch tensors
for key, value in state_dict.items():
_lowerCamelCase : Dict = torch.from_numpy(_lowerCAmelCase )
# load 🤗 model
_lowerCamelCase : int = MaskFormerForInstanceSegmentation(_lowerCAmelCase )
model.eval()
for name, param in model.named_parameters():
print(_lowerCAmelCase , param.shape )
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(_lowerCAmelCase ) == 0, F'Unexpected keys: {unexpected_keys}'
# verify results
_lowerCamelCase : Any = prepare_img()
if "vistas" in model_name:
_lowerCamelCase : Any = 65
elif "cityscapes" in model_name:
_lowerCamelCase : Optional[Any] = 65535
else:
_lowerCamelCase : str = 255
_lowerCamelCase : List[str] = True if "ade" in model_name else False
_lowerCamelCase : Union[str, Any] = MaskFormerImageProcessor(ignore_index=_lowerCAmelCase , reduce_labels=_lowerCAmelCase )
_lowerCamelCase : int = image_processor(_lowerCAmelCase , return_tensors="pt" )
_lowerCamelCase : Tuple = model(**_lowerCAmelCase )
print("Logits:" , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
_lowerCamelCase : Tuple = torch.tensor(
[[3.6_3_5_3, -4.4_7_7_0, -2.6_0_6_5], [0.5_0_8_1, -4.2_3_9_4, -3.5_3_4_3], [2.1_9_0_9, -5.0_3_5_3, -1.9_3_2_3]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , _lowerCAmelCase , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F'Saving model and image processor to {pytorch_dump_folder_path}' )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
image_processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
print("Pushing model and image processor to the hub..." )
model.push_to_hub(F'nielsr/{model_name}' )
image_processor.push_to_hub(F'nielsr/{model_name}' )
if __name__ == "__main__":
UpperCAmelCase_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='maskformer-swin-tiny-ade',
type=str,
help=('Name of the MaskFormer model you\'d like to convert',),
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl',
type=str,
help='Path to the original state dict (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
UpperCAmelCase_ : int = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 44 | 0 |
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : str ):
SCREAMING_SNAKE_CASE_ = {}
def lowerCAmelCase_ ( self : List[str] ):
print(self.vertex )
for i in self.vertex:
print(_lowerCAmelCase , ' -> ' , ' -> '.join([str(_lowerCAmelCase ) for j in self.vertex[i]] ) )
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : int , _lowerCAmelCase : int ):
# check if vertex is already present,
if from_vertex in self.vertex:
self.vertex[from_vertex].append(_lowerCAmelCase )
else:
# else make a new vertex
SCREAMING_SNAKE_CASE_ = [to_vertex]
def lowerCAmelCase_ ( self : Optional[Any] ):
# visited array for storing already visited nodes
SCREAMING_SNAKE_CASE_ = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Tuple , _lowerCAmelCase : int , _lowerCAmelCase : list ):
# mark start vertex as visited
SCREAMING_SNAKE_CASE_ = True
print(_lowerCAmelCase , end=' ' )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ : List[Any] = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print('DFS:')
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 31 |
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = range(2, 20 + 1)
UpperCAmelCase_ : str = [10**k for k in range(ks[-1] + 1)]
UpperCAmelCase_ : dict[int, dict[int, list[list[int]]]] = {}
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = sum(a_i[j] for j in range(_lowerCAmelCase , len(_lowerCAmelCase ) ) )
_lowerCamelCase : List[str] = sum(a_i[j] * base[j] for j in range(min(len(_lowerCAmelCase ) , _lowerCAmelCase ) ) )
_lowerCamelCase , _lowerCamelCase : int = 0, 0
_lowerCamelCase : Dict = n - i
_lowerCamelCase : int = memo.get(_lowerCAmelCase )
if sub_memo is not None:
_lowerCamelCase : List[str] = sub_memo.get(_lowerCAmelCase )
if jumps is not None and len(_lowerCAmelCase ) > 0:
# find and make the largest jump without going over
_lowerCamelCase : List[Any] = -1
for _k in range(len(_lowerCAmelCase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
_lowerCamelCase : Any = _k
break
if max_jump >= 0:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str = jumps[max_jump]
# since the difference between jumps is cached, add c
_lowerCamelCase : str = diff + c
for j in range(min(_lowerCAmelCase , len(_lowerCAmelCase ) ) ):
_lowerCamelCase , _lowerCamelCase : List[Any] = divmod(_lowerCAmelCase , 10 )
if new_c > 0:
add(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
_lowerCamelCase : int = []
else:
_lowerCamelCase : Tuple = {c: []}
_lowerCamelCase : Any = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
_lowerCamelCase , _lowerCamelCase : Optional[int] = next_term(_lowerCAmelCase , k - 1 , i + dn , _lowerCAmelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
_lowerCamelCase , _lowerCamelCase : List[str] = compute(_lowerCAmelCase , _lowerCAmelCase , i + dn , _lowerCAmelCase )
diff += _diff
dn += terms_jumped
_lowerCamelCase : List[str] = sub_memo[c]
# keep jumps sorted by # of terms skipped
_lowerCamelCase : int = 0
while j < len(_lowerCAmelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(_lowerCAmelCase , (diff, dn, k) )
return (diff, dn)
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Any , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any] ):
"""simple docstring"""
if i >= n:
return 0, i
if k > len(_lowerCAmelCase ):
a_i.extend([0 for _ in range(k - len(_lowerCAmelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
_lowerCamelCase : List[str] = i
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Any = 0, 0, 0
for j in range(len(_lowerCAmelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
_lowerCamelCase : int = ds_c + ds_b
diff += addend
_lowerCamelCase : List[str] = 0
for j in range(_lowerCAmelCase ):
_lowerCamelCase : List[Any] = a_i[j] + addend
_lowerCamelCase , _lowerCamelCase : Any = divmod(_lowerCAmelCase , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return diff, i - start_i
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : List[Any] ):
"""simple docstring"""
for j in range(_lowerCAmelCase , len(_lowerCAmelCase ) ):
_lowerCamelCase : Tuple = digits[j] + addend
if s >= 10:
_lowerCamelCase , _lowerCamelCase : Optional[int] = divmod(_lowerCAmelCase , 10 )
_lowerCamelCase : Any = addend // 10 + quotient
else:
_lowerCamelCase : Tuple = s
_lowerCamelCase : List[Any] = addend // 10
if addend == 0:
break
while addend > 0:
_lowerCamelCase , _lowerCamelCase : str = divmod(_lowerCAmelCase , 10 )
digits.append(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : int = 10**15 ):
"""simple docstring"""
_lowerCamelCase : Tuple = [1]
_lowerCamelCase : List[Any] = 1
_lowerCamelCase : List[str] = 0
while True:
_lowerCamelCase , _lowerCamelCase : Dict = next_term(_lowerCAmelCase , 20 , i + dn , _lowerCAmelCase )
dn += terms_jumped
if dn == n - i:
break
_lowerCamelCase : Optional[Any] = 0
for j in range(len(_lowerCAmelCase ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f'''{solution() = }''')
| 44 | 0 |
import numpy as np
import datasets
UpperCAmelCase_ = "\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n"
UpperCAmelCase_ = "\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n"
UpperCAmelCase_ = "\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric(\"mahalanobis\")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {'mahalanobis': array([0.5])}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCamelCase ( datasets.Metric ):
def UpperCamelCase( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''X''': datasets.Sequence(datasets.Value('''float''' , id='''sequence''' ) , id='''X''' ),
} ) , )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase ):
# convert to numpy arrays
_UpperCAmelCase = np.array(_UpperCamelCase )
_UpperCAmelCase = np.array(_UpperCamelCase )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError('''Expected `X` to be a 2D vector''' )
if len(reference_distribution.shape ) != 2:
raise ValueError('''Expected `reference_distribution` to be a 2D vector''' )
if reference_distribution.shape[0] < 2:
raise ValueError(
'''Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension''' )
# Get mahalanobis distance for each prediction
_UpperCAmelCase = X - np.mean(_UpperCamelCase )
_UpperCAmelCase = np.cov(reference_distribution.T )
try:
_UpperCAmelCase = np.linalg.inv(_UpperCamelCase )
except np.linalg.LinAlgError:
_UpperCAmelCase = np.linalg.pinv(_UpperCamelCase )
_UpperCAmelCase = np.dot(_UpperCamelCase , _UpperCamelCase )
_UpperCAmelCase = np.dot(_UpperCamelCase , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 32 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
UpperCAmelCase_ : Any = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
lowerCAmelCase_ = field(default=A , metadata={'help': 'Whether tp freeze the encoder.'} )
lowerCAmelCase_ = field(default=A , metadata={'help': 'Whether to freeze the embeddings.'} )
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
lowerCAmelCase_ = field(
default='summarization' , metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'} , )
lowerCAmelCase_ = field(
default=1024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase_ = field(
default=128 , metadata={
'help': (
'The maximum total sequence length for target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase_ = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for validation target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded. '
'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '
'during ``evaluate`` and ``predict``.'
)
} , )
lowerCAmelCase_ = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for test target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase_ = field(default=-1 , metadata={'help': '# training examples. -1 means use all.'} )
lowerCAmelCase_ = field(default=-1 , metadata={'help': '# validation examples. -1 means use all.'} )
lowerCAmelCase_ = field(default=-1 , metadata={'help': '# test examples. -1 means use all.'} )
lowerCAmelCase_ = field(default=A , metadata={'help': 'Source language id for translation.'} )
lowerCAmelCase_ = field(default=A , metadata={'help': 'Target language id for translation.'} )
lowerCAmelCase_ = field(default=A , metadata={'help': '# num_beams to use for evaluation.'} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'} , )
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Any ):
"""simple docstring"""
logger.info(F'***** {split} metrics *****' )
for key in sorted(metrics.keys() ):
logger.info(F' {key} = {metrics[key]}' )
save_json(_lowerCAmelCase , os.path.join(_lowerCAmelCase , F'{split}_results.json' ) )
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : str = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Optional[Any] = parser.parse_args_into_dataclasses()
check_output_dir(_lowerCAmelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s" , _lowerCAmelCase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowerCamelCase : Optional[int] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_lowerCamelCase : Tuple = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
assert hasattr(_lowerCAmelCase , _lowerCAmelCase ), F'({config.__class__.__name__}) doesn\'t have a `{p}` attribute'
setattr(_lowerCAmelCase , _lowerCAmelCase , getattr(_lowerCAmelCase , _lowerCAmelCase ) )
_lowerCamelCase : List[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_lowerCamelCase : int = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=".ckpt" in model_args.model_name_or_path , config=_lowerCAmelCase , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(_lowerCAmelCase , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
_lowerCamelCase : List[Any] = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(_lowerCAmelCase , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCamelCase : Any = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
_lowerCamelCase : int = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(_lowerCAmelCase )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
_lowerCamelCase : int = SeqaSeqDataset
# Get datasets
_lowerCamelCase : Tuple = (
dataset_class(
_lowerCAmelCase , type_path="train" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_train
else None
)
_lowerCamelCase : List[Any] = (
dataset_class(
_lowerCAmelCase , type_path="val" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
_lowerCamelCase : Optional[int] = (
dataset_class(
_lowerCAmelCase , type_path="test" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
_lowerCamelCase : int = (
build_compute_metrics_fn(data_args.task , _lowerCAmelCase ) if training_args.predict_with_generate else None
)
_lowerCamelCase : List[Any] = SeqaSeqTrainer(
model=_lowerCAmelCase , args=_lowerCAmelCase , data_args=_lowerCAmelCase , train_dataset=_lowerCAmelCase , eval_dataset=_lowerCAmelCase , data_collator=SeqaSeqDataCollator(
_lowerCAmelCase , _lowerCAmelCase , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=_lowerCAmelCase , tokenizer=_lowerCAmelCase , )
_lowerCamelCase : Optional[Any] = {}
# Training
if training_args.do_train:
logger.info("*** Train ***" )
_lowerCamelCase : Optional[Any] = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
_lowerCamelCase : int = train_result.metrics
_lowerCamelCase : Optional[int] = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("train" , _lowerCAmelCase , training_args.output_dir )
all_metrics.update(_lowerCAmelCase )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
_lowerCamelCase : Optional[Any] = trainer.evaluate(metric_key_prefix="val" )
_lowerCamelCase : Dict = data_args.n_val
_lowerCamelCase : List[Any] = round(metrics["val_loss"] , 4 )
if trainer.is_world_process_zero():
handle_metrics("val" , _lowerCAmelCase , training_args.output_dir )
all_metrics.update(_lowerCAmelCase )
if training_args.do_predict:
logger.info("*** Predict ***" )
_lowerCamelCase : Any = trainer.predict(test_dataset=_lowerCAmelCase , metric_key_prefix="test" )
_lowerCamelCase : Dict = test_output.metrics
_lowerCamelCase : Optional[int] = data_args.n_test
if trainer.is_world_process_zero():
_lowerCamelCase : int = round(metrics["test_loss"] , 4 )
handle_metrics("test" , _lowerCAmelCase , training_args.output_dir )
all_metrics.update(_lowerCAmelCase )
if training_args.predict_with_generate:
_lowerCamelCase : List[str] = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase )
_lowerCamelCase : Any = lmap(str.strip , _lowerCAmelCase )
write_txt_file(_lowerCAmelCase , os.path.join(training_args.output_dir , "test_generations.txt" ) )
if trainer.is_world_process_zero():
save_json(_lowerCAmelCase , os.path.join(training_args.output_dir , "all_results.json" ) )
return all_metrics
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 44 | 0 |
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class __magic_name__ :
'''simple docstring'''
def __init__( self:Union[str, Any] , _a:List[str] ):
snake_case__ = data
snake_case__ = [0X67_452_301, 0XEF_CDA_B89, 0X98_BAD_CFE, 0X10_325_476, 0XC3_D2E_1F0]
@staticmethod
def SCREAMING_SNAKE_CASE__ ( _a:Dict , _a:List[str] ):
return ((n << b) | (n >> (32 - b))) & 0XFF_FFF_FFF
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = B'''\x80''' + B'''\x00''' * (63 - (len(self.data ) + 8) % 64)
snake_case__ = self.data + padding + struct.pack('''>Q''' , 8 * len(self.data ) )
return padded_data
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:Optional[Any] ):
snake_case__ = list(struct.unpack('''>16L''' , _a ) ) + [0] * 64
for i in range(16 , 80 ):
snake_case__ = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = self.padding()
snake_case__ = self.split_blocks()
for block in self.blocks:
snake_case__ = self.expand_block(_a )
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
snake_case__ = (b & c) | ((~b) & d)
snake_case__ = 0X5A_827_999
elif 20 <= i < 40:
snake_case__ = b ^ c ^ d
snake_case__ = 0X6E_D9E_BA1
elif 40 <= i < 60:
snake_case__ = (b & c) | (b & d) | (c & d)
snake_case__ = 0X8F_1BB_CDC
elif 60 <= i < 80:
snake_case__ = b ^ c ^ d
snake_case__ = 0XCA_62C_1D6
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = (
self.rotate(_a , 5 ) + f + e + k + expanded_block[i] & 0XFF_FFF_FFF,
a,
self.rotate(_a , 30 ),
c,
d,
)
snake_case__ = (
self.h[0] + a & 0XFF_FFF_FFF,
self.h[1] + b & 0XFF_FFF_FFF,
self.h[2] + c & 0XFF_FFF_FFF,
self.h[3] + d & 0XFF_FFF_FFF,
self.h[4] + e & 0XFF_FFF_FFF,
)
return ("{:08x}" * 5).format(*self.h )
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
snake_case__ = B'''Test String'''
assert SHAaHash(__lowerCAmelCase ).final_hash() == hashlib.shaa(__lowerCAmelCase ).hexdigest() # noqa: S324
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
snake_case__ = argparse.ArgumentParser(description='''Process some strings or files''' )
parser.add_argument(
'''--string''' , dest='''input_string''' , default='''Hello World!! Welcome to Cryptography''' , help='''Hash the string''' , )
parser.add_argument('''--file''' , dest='''input_file''' , help='''Hash contents of a file''' )
snake_case__ = parser.parse_args()
snake_case__ = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , '''rb''' ) as f:
snake_case__ = f.read()
else:
snake_case__ = bytes(__lowerCAmelCase , '''utf-8''' )
print(SHAaHash(__lowerCAmelCase ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 33 |
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase__ :
def __init__( self : List[Any],__A : str,__A : List[str]=1_3,__A : str=3_2,__A : Tuple=2,__A : Any=3,__A : Dict=1_6,__A : Dict=[3_2, 6_4, 1_2_8],__A : List[str]=[1, 2, 1],__A : str=[2, 2, 4],__A : Optional[int]=2,__A : Dict=2.0,__A : str=True,__A : Tuple=0.0,__A : int=0.0,__A : List[str]=0.1,__A : Any="gelu",__A : List[Any]=False,__A : Optional[Any]=True,__A : List[str]=0.02,__A : Tuple=1e-5,__A : Any=True,__A : Tuple=None,__A : Tuple=True,__A : Tuple=1_0,__A : List[Any]=8,__A : Optional[int]=["stage1", "stage2"],__A : int=[1, 2],):
_lowerCamelCase : List[Any] = parent
_lowerCamelCase : Optional[Any] = batch_size
_lowerCamelCase : Optional[int] = image_size
_lowerCamelCase : int = patch_size
_lowerCamelCase : Optional[Any] = num_channels
_lowerCamelCase : int = embed_dim
_lowerCamelCase : int = hidden_sizes
_lowerCamelCase : List[Any] = depths
_lowerCamelCase : Any = num_heads
_lowerCamelCase : List[str] = window_size
_lowerCamelCase : str = mlp_ratio
_lowerCamelCase : Any = qkv_bias
_lowerCamelCase : str = hidden_dropout_prob
_lowerCamelCase : str = attention_probs_dropout_prob
_lowerCamelCase : List[str] = drop_path_rate
_lowerCamelCase : str = hidden_act
_lowerCamelCase : Union[str, Any] = use_absolute_embeddings
_lowerCamelCase : List[Any] = patch_norm
_lowerCamelCase : Tuple = layer_norm_eps
_lowerCamelCase : str = initializer_range
_lowerCamelCase : Optional[int] = is_training
_lowerCamelCase : Tuple = scope
_lowerCamelCase : List[Any] = use_labels
_lowerCamelCase : int = type_sequence_label_size
_lowerCamelCase : Tuple = encoder_stride
_lowerCamelCase : Any = out_features
_lowerCamelCase : Any = out_indices
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase : List[Any] = None
if self.use_labels:
_lowerCamelCase : str = ids_tensor([self.batch_size],self.type_sequence_label_size )
_lowerCamelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self : Union[str, Any] ):
return FocalNetConfig(
image_size=self.image_size,patch_size=self.patch_size,num_channels=self.num_channels,embed_dim=self.embed_dim,hidden_sizes=self.hidden_sizes,depths=self.depths,num_heads=self.num_heads,window_size=self.window_size,mlp_ratio=self.mlp_ratio,qkv_bias=self.qkv_bias,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,drop_path_rate=self.drop_path_rate,hidden_act=self.hidden_act,use_absolute_embeddings=self.use_absolute_embeddings,path_norm=self.patch_norm,layer_norm_eps=self.layer_norm_eps,initializer_range=self.initializer_range,encoder_stride=self.encoder_stride,out_features=self.out_features,out_indices=self.out_indices,)
def lowerCamelCase_ ( self : int,__A : Union[str, Any],__A : Tuple,__A : List[Any] ):
_lowerCamelCase : Optional[Any] = FocalNetModel(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : Optional[Any] = model(__A )
_lowerCamelCase : Optional[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_lowerCamelCase : Union[str, Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, expected_seq_len, expected_dim) )
def lowerCamelCase_ ( self : int,__A : Optional[int],__A : int,__A : Optional[int] ):
_lowerCamelCase : Any = FocalNetBackbone(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : List[str] = model(__A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ),len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ),[self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ),len(config.out_features ) )
self.parent.assertListEqual(model.channels,config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
_lowerCamelCase : List[str] = None
_lowerCamelCase : List[str] = FocalNetBackbone(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : str = model(__A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ),1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ),[self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ),1 )
self.parent.assertListEqual(model.channels,[config.hidden_sizes[-1]] )
def lowerCamelCase_ ( self : Optional[int],__A : Optional[int],__A : Dict,__A : Dict ):
_lowerCamelCase : List[Any] = FocalNetForMaskedImageModeling(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : List[str] = model(__A )
self.parent.assertEqual(
result.reconstruction.shape,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_lowerCamelCase : Dict = 1
_lowerCamelCase : Any = FocalNetForMaskedImageModeling(__A )
model.to(__A )
model.eval()
_lowerCamelCase : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase : Optional[int] = model(__A )
self.parent.assertEqual(result.reconstruction.shape,(self.batch_size, 1, self.image_size, self.image_size) )
def lowerCamelCase_ ( self : List[Any],__A : Union[str, Any],__A : List[Any],__A : Optional[Any] ):
_lowerCamelCase : Union[str, Any] = self.type_sequence_label_size
_lowerCamelCase : Optional[Any] = FocalNetForImageClassification(__A )
model.to(__A )
model.eval()
_lowerCamelCase : Optional[int] = model(__A,labels=__A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCamelCase : str = 1
_lowerCamelCase : str = FocalNetForImageClassification(__A )
model.to(__A )
model.eval()
_lowerCamelCase : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase : List[Any] = model(__A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : int = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Union[str, Any] = config_and_inputs
_lowerCamelCase : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( A , A , unittest.TestCase ):
lowerCAmelCase_ = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowerCAmelCase_ = (
{'feature-extraction': FocalNetModel, 'image-classification': FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Optional[int] = FocalNetModelTester(self )
_lowerCamelCase : int = ConfigTester(self,config_class=__A,embed_dim=3_7,has_text_modality=__A )
def lowerCamelCase_ ( self : Union[str, Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase_ ( self : List[str] ):
return
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__A )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__A )
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
@unittest.skip(reason="FocalNet does not use inputs_embeds" )
def lowerCamelCase_ ( self : Optional[int] ):
pass
@unittest.skip(reason="FocalNet does not use feedforward chunking" )
def lowerCamelCase_ ( self : List[str] ):
pass
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase , _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_lowerCamelCase : str = model_class(__A )
self.assertIsInstance(model.get_input_embeddings(),(nn.Module) )
_lowerCamelCase : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__A,nn.Linear ) )
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_lowerCamelCase : Union[str, Any] = model_class(__A )
_lowerCamelCase : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : int = [*signature.parameters.keys()]
_lowerCamelCase : Union[str, Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1],__A )
def lowerCamelCase_ ( self : Tuple,__A : Any,__A : List[Any],__A : str,__A : Any ):
_lowerCamelCase : Union[str, Any] = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
_lowerCamelCase : Optional[int] = model(**self._prepare_for_class(__A,__A ) )
_lowerCamelCase : Optional[int] = outputs.hidden_states
_lowerCamelCase : int = getattr(
self.model_tester,"expected_num_hidden_layers",len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__A ),__A )
# FocalNet has a different seq_length
_lowerCamelCase : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCamelCase : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ),[num_patches, self.model_tester.embed_dim],)
_lowerCamelCase : Any = outputs.reshaped_hidden_states
self.assertEqual(len(__A ),__A )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Tuple = reshaped_hidden_states[0].shape
_lowerCamelCase : List[str] = (
reshaped_hidden_states[0].view(__A,__A,height * width ).permute(0,2,1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ),[num_patches, self.model_tester.embed_dim],)
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase , _lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Optional[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
_lowerCamelCase : List[Any] = True
self.check_hidden_states_output(__A,__A,__A,__A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase : List[Any] = True
self.check_hidden_states_output(__A,__A,__A,__A )
def lowerCamelCase_ ( self : Optional[Any] ):
_lowerCamelCase , _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Tuple = 3
_lowerCamelCase : Optional[int] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_lowerCamelCase : Tuple = (
config.patch_size
if isinstance(config.patch_size,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCamelCase : Any = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_lowerCamelCase : int = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
_lowerCamelCase : List[Any] = True
self.check_hidden_states_output(__A,__A,__A,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase : Optional[Any] = True
self.check_hidden_states_output(__A,__A,__A,(padded_height, padded_width) )
@slow
def lowerCamelCase_ ( self : Tuple ):
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Dict = FocalNetModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase , _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Optional[Any] = _config_zero_init(__A )
for model_class in self.all_model_classes:
_lowerCamelCase : Any = model_class(config=__A )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item(),[0.0, 1.0],msg=f'Parameter {name} of model {model_class} seems not properly initialized',)
@require_vision
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self : Union[str, Any] ):
# TODO update organization
return AutoImageProcessor.from_pretrained("microsoft/focalnet-tiny" ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : Any = FocalNetForImageClassification.from_pretrained("microsoft/focalnet-tiny" ).to(__A )
_lowerCamelCase : int = self.default_image_processor
_lowerCamelCase : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
_lowerCamelCase : Dict = image_processor(images=__A,return_tensors="pt" ).to(__A )
# forward pass
with torch.no_grad():
_lowerCamelCase : Dict = model(**__A )
# verify the logits
_lowerCamelCase : List[Any] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape,__A )
_lowerCamelCase : List[str] = torch.tensor([0.2166, -0.4368, 0.2191] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3],__A,atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item(),2_8_1 )
@require_torch
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = (FocalNetBackbone,) if is_torch_available() else ()
lowerCAmelCase_ = FocalNetConfig
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : int = FocalNetModelTester(self )
| 44 | 0 |
"""simple docstring"""
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('0.8.3'):
raise Exception('requires gluonnlp == 0.8.3')
if version.parse(mx.__version__) != version.parse('1.5.0'):
raise Exception('requires mxnet == 1.5.0')
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = 'The Nymphenburg Palace is a beautiful palace in Munich!'
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = {
'''attention_cell''': '''multi_head''',
'''num_layers''': 4,
'''units''': 1024,
'''hidden_size''': 768,
'''max_length''': 512,
'''num_heads''': 8,
'''scaled''': True,
'''dropout''': 0.1,
'''use_residual''': True,
'''embed_size''': 1024,
'''embed_dropout''': 0.1,
'''word_embed''': None,
'''layer_norm_eps''': 1e-5,
'''token_type_vocab_size''': 2,
}
UpperCamelCase = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
UpperCamelCase = BERTEncoder(
attention_cell=predefined_args['''attention_cell'''] ,num_layers=predefined_args['''num_layers'''] ,units=predefined_args['''units'''] ,hidden_size=predefined_args['''hidden_size'''] ,max_length=predefined_args['''max_length'''] ,num_heads=predefined_args['''num_heads'''] ,scaled=predefined_args['''scaled'''] ,dropout=predefined_args['''dropout'''] ,output_attention=_lowercase ,output_all_encodings=_lowercase ,use_residual=predefined_args['''use_residual'''] ,activation=predefined_args.get('''activation''' ,'''gelu''' ) ,layer_norm_eps=predefined_args.get('''layer_norm_eps''' ,_lowercase ) ,)
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
UpperCamelCase = '''openwebtext_ccnews_stories_books_cased'''
# Specify download folder to Gluonnlp's vocab
UpperCamelCase = os.path.join(get_home_dir() ,'''models''' )
UpperCamelCase = _load_vocab(_lowercase ,_lowercase ,_lowercase ,cls=_lowercase )
UpperCamelCase = nlp.model.BERTModel(
_lowercase ,len(_lowercase ) ,units=predefined_args['''units'''] ,embed_size=predefined_args['''embed_size'''] ,embed_dropout=predefined_args['''embed_dropout'''] ,word_embed=predefined_args['''word_embed'''] ,use_pooler=_lowercase ,use_token_type_embed=_lowercase ,token_type_vocab_size=predefined_args['''token_type_vocab_size'''] ,use_classifier=_lowercase ,use_decoder=_lowercase ,)
original_bort.load_parameters(_lowercase ,cast_dtype=_lowercase ,ignore_extra=_lowercase )
UpperCamelCase = original_bort._collect_params_with_prefix()
# Build our config 🤗
UpperCamelCase = {
'''architectures''': ['''BertForMaskedLM'''],
'''attention_probs_dropout_prob''': predefined_args['''dropout'''],
'''hidden_act''': '''gelu''',
'''hidden_dropout_prob''': predefined_args['''dropout'''],
'''hidden_size''': predefined_args['''embed_size'''],
'''initializer_range''': 0.02,
'''intermediate_size''': predefined_args['''hidden_size'''],
'''layer_norm_eps''': predefined_args['''layer_norm_eps'''],
'''max_position_embeddings''': predefined_args['''max_length'''],
'''model_type''': '''bort''',
'''num_attention_heads''': predefined_args['''num_heads'''],
'''num_hidden_layers''': predefined_args['''num_layers'''],
'''pad_token_id''': 1, # 2 = BERT, 1 = RoBERTa
'''type_vocab_size''': 1, # 2 = BERT, 1 = RoBERTa
'''vocab_size''': len(_lowercase ),
}
UpperCamelCase = BertConfig.from_dict(_lowercase )
UpperCamelCase = BertForMaskedLM(_lowercase )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(_lowercase ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(_lowercase ,_lowercase ):
UpperCamelCase = hf_param.shape
UpperCamelCase = to_torch(params[gluon_param] )
UpperCamelCase = gluon_param.shape
assert (
shape_hf == shape_gluon
), f'The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'
return gluon_param
UpperCamelCase = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight ,'''word_embed.0.weight''' )
UpperCamelCase = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight ,'''encoder.position_weight''' )
UpperCamelCase = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias ,'''encoder.layer_norm.beta''' )
UpperCamelCase = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight ,'''encoder.layer_norm.gamma''' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
UpperCamelCase = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
UpperCamelCase = hf_bort_model.bert.encoder.layer[i]
# self attention
UpperCamelCase = layer.attention.self
UpperCamelCase = check_and_map_params(
self_attn.key.bias.data ,f'encoder.transformer_cells.{i}.attention_cell.proj_key.bias' )
UpperCamelCase = check_and_map_params(
self_attn.key.weight.data ,f'encoder.transformer_cells.{i}.attention_cell.proj_key.weight' )
UpperCamelCase = check_and_map_params(
self_attn.query.bias.data ,f'encoder.transformer_cells.{i}.attention_cell.proj_query.bias' )
UpperCamelCase = check_and_map_params(
self_attn.query.weight.data ,f'encoder.transformer_cells.{i}.attention_cell.proj_query.weight' )
UpperCamelCase = check_and_map_params(
self_attn.value.bias.data ,f'encoder.transformer_cells.{i}.attention_cell.proj_value.bias' )
UpperCamelCase = check_and_map_params(
self_attn.value.weight.data ,f'encoder.transformer_cells.{i}.attention_cell.proj_value.weight' )
# self attention output
UpperCamelCase = layer.attention.output
UpperCamelCase = check_and_map_params(
self_output.dense.bias ,f'encoder.transformer_cells.{i}.proj.bias' )
UpperCamelCase = check_and_map_params(
self_output.dense.weight ,f'encoder.transformer_cells.{i}.proj.weight' )
UpperCamelCase = check_and_map_params(
self_output.LayerNorm.bias ,f'encoder.transformer_cells.{i}.layer_norm.beta' )
UpperCamelCase = check_and_map_params(
self_output.LayerNorm.weight ,f'encoder.transformer_cells.{i}.layer_norm.gamma' )
# intermediate
UpperCamelCase = layer.intermediate
UpperCamelCase = check_and_map_params(
intermediate.dense.bias ,f'encoder.transformer_cells.{i}.ffn.ffn_1.bias' )
UpperCamelCase = check_and_map_params(
intermediate.dense.weight ,f'encoder.transformer_cells.{i}.ffn.ffn_1.weight' )
# output
UpperCamelCase = layer.output
UpperCamelCase = check_and_map_params(
bert_output.dense.bias ,f'encoder.transformer_cells.{i}.ffn.ffn_2.bias' )
UpperCamelCase = check_and_map_params(
bert_output.dense.weight ,f'encoder.transformer_cells.{i}.ffn.ffn_2.weight' )
UpperCamelCase = check_and_map_params(
bert_output.LayerNorm.bias ,f'encoder.transformer_cells.{i}.ffn.layer_norm.beta' )
UpperCamelCase = check_and_map_params(
bert_output.LayerNorm.weight ,f'encoder.transformer_cells.{i}.ffn.layer_norm.gamma' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
UpperCamelCase = RobertaTokenizer.from_pretrained('''roberta-base''' )
UpperCamelCase = tokenizer.encode_plus(_lowercase )['''input_ids''']
# Get gluon output
UpperCamelCase = mx.nd.array([input_ids] )
UpperCamelCase = original_bort(inputs=_lowercase ,token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(_lowercase )
UpperCamelCase = BertModel.from_pretrained(_lowercase )
hf_bort_model.eval()
UpperCamelCase = tokenizer.encode_plus(_lowercase ,return_tensors='''pt''' )
UpperCamelCase = hf_bort_model(**_lowercase )[0]
UpperCamelCase = output_gluon[0].asnumpy()
UpperCamelCase = output_hf[0].detach().numpy()
UpperCamelCase = np.max(np.abs(hf_layer - gluon_layer ) ).item()
UpperCamelCase = np.allclose(_lowercase ,_lowercase ,atol=1e-3 )
if success:
print('''✔️ Both model do output the same tensors''' )
else:
print('''❌ Both model do **NOT** output the same tensors''' )
print('''Absolute difference is:''' ,_lowercase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--bort_checkpoint_path', default=None, type=str, required=True, help='Path the official Bort params file.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 34 |
'''simple docstring'''
class UpperCAmelCase__ :
def __init__( self : Any,__A : Any,__A : Any,__A : Any ):
_lowerCamelCase : List[Any] = name
_lowerCamelCase : Union[str, Any] = value
_lowerCamelCase : str = weight
def __repr__( self : Any ):
return f'{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'
def lowerCamelCase_ ( self : Optional[int] ):
return self.value
def lowerCamelCase_ ( self : Any ):
return self.name
def lowerCamelCase_ ( self : List[Any] ):
return self.weight
def lowerCamelCase_ ( self : str ):
return self.value / self.weight
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : str = []
for i in range(len(_lowerCAmelCase ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Dict = sorted(_lowerCAmelCase , key=_lowerCAmelCase , reverse=_lowerCAmelCase )
_lowerCamelCase : Optional[int] = []
_lowerCamelCase , _lowerCamelCase : Optional[int] = 0.0, 0.0
for i in range(len(_lowerCAmelCase ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def A_ ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44 | 0 |
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class lowercase ( enum.Enum ):
lowerCamelCase : int = 0
lowerCamelCase : Tuple = 1
lowerCamelCase : List[str] = 2
@add_end_docstrings(_UpperCAmelCase )
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Dict = '''
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
'''
def __init__( self : Optional[int] , *_lowercase : Optional[Any] , **_lowercase : Dict ):
super().__init__(*_lowercase , **_lowercase )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
SCREAMING_SNAKE_CASE__ : Dict = None
if self.model.config.prefix is not None:
SCREAMING_SNAKE_CASE__ : Any = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
SCREAMING_SNAKE_CASE__ : Any = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = self._sanitize_parameters(prefix=_lowercase , **self._forward_params )
SCREAMING_SNAKE_CASE__ : str = {**self._preprocess_params, **preprocess_params}
SCREAMING_SNAKE_CASE__ : str = {**self._forward_params, **forward_params}
def lowercase__ ( self : Optional[int] , _lowercase : Any=None , _lowercase : Optional[int]=None , _lowercase : List[Any]=None , _lowercase : List[Any]=None , _lowercase : Optional[Any]=None , _lowercase : Optional[int]=None , _lowercase : List[Any]=None , _lowercase : Tuple=None , **_lowercase : Union[str, Any] , ):
SCREAMING_SNAKE_CASE__ : int = {}
if prefix is not None:
SCREAMING_SNAKE_CASE__ : Tuple = prefix
if prefix:
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.tokenizer(
_lowercase , padding=_lowercase , add_special_tokens=_lowercase , return_tensors=self.framework )
SCREAMING_SNAKE_CASE__ : Dict = prefix_inputs['''input_ids'''].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f"""{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"""
''' [None, \'hole\']''' )
SCREAMING_SNAKE_CASE__ : Any = handle_long_generation
preprocess_params.update(_lowercase )
SCREAMING_SNAKE_CASE__ : int = generate_kwargs
SCREAMING_SNAKE_CASE__ : str = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_full_text`''' )
if return_tensors is not None:
raise ValueError('''`return_full_text` is mutually exclusive with `return_tensors`''' )
SCREAMING_SNAKE_CASE__ : List[str] = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_tensors`''' )
SCREAMING_SNAKE_CASE__ : Optional[int] = ReturnType.TENSORS
if return_type is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] = return_type
if clean_up_tokenization_spaces is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] = clean_up_tokenization_spaces
if stop_sequence is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] = self.tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
if len(_lowercase ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
SCREAMING_SNAKE_CASE__ : int = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def lowercase__ ( self : Union[str, Any] , *_lowercase : Tuple , **_lowercase : Any ):
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'''add_space_before_punct_symbol''': True} )
return super()._parse_and_tokenize(*_lowercase , **_lowercase )
def __call__( self : List[str] , _lowercase : Tuple , **_lowercase : Dict ):
return super().__call__(_lowercase , **_lowercase )
def lowercase__ ( self : Optional[int] , _lowercase : Optional[int] , _lowercase : int="" , _lowercase : int=None , **_lowercase : List[str] ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.tokenizer(
prefix + prompt_text , padding=_lowercase , add_special_tokens=_lowercase , return_tensors=self.framework )
SCREAMING_SNAKE_CASE__ : Tuple = prompt_text
if handle_long_generation == "hole":
SCREAMING_SNAKE_CASE__ : Optional[Any] = inputs['''input_ids'''].shape[-1]
if "max_new_tokens" in generate_kwargs:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = generate_kwargs['''max_new_tokens''']
else:
SCREAMING_SNAKE_CASE__ : Any = generate_kwargs.get('''max_length''' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('''We cannot infer how many new tokens are expected''' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'''We cannot use `hole` to handle this generation the number of desired tokens exceeds the'''
''' models max length''' )
SCREAMING_SNAKE_CASE__ : Any = inputs['''input_ids'''][:, -keep_length:]
if "attention_mask" in inputs:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = inputs['''attention_mask'''][:, -keep_length:]
return inputs
def lowercase__ ( self : Optional[Any] , _lowercase : Any , **_lowercase : Dict ):
SCREAMING_SNAKE_CASE__ : str = model_inputs['''input_ids''']
SCREAMING_SNAKE_CASE__ : Optional[Any] = model_inputs.get('''attention_mask''' , _lowercase )
# Allow empty prompts
if input_ids.shape[1] == 0:
SCREAMING_SNAKE_CASE__ : Dict = None
SCREAMING_SNAKE_CASE__ : Any = None
SCREAMING_SNAKE_CASE__ : Optional[int] = 1
else:
SCREAMING_SNAKE_CASE__ : Dict = input_ids.shape[0]
SCREAMING_SNAKE_CASE__ : Tuple = model_inputs.pop('''prompt_text''' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
SCREAMING_SNAKE_CASE__ : Optional[Any] = generate_kwargs.pop('''prefix_length''' , 0 )
if prefix_length > 0:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = '''max_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].max_new_tokens is not None
)
if not has_max_new_tokens:
SCREAMING_SNAKE_CASE__ : Dict = generate_kwargs.get('''max_length''' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
SCREAMING_SNAKE_CASE__ : str = '''min_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
SCREAMING_SNAKE_CASE__ : Dict = self.model.generate(input_ids=_lowercase , attention_mask=_lowercase , **_lowercase )
SCREAMING_SNAKE_CASE__ : Tuple = generated_sequence.shape[0]
if self.framework == "pt":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = generated_sequence.reshape(_lowercase , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
SCREAMING_SNAKE_CASE__ : Tuple = tf.reshape(_lowercase , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def lowercase__ ( self : Optional[int] , _lowercase : Any , _lowercase : int=ReturnType.FULL_TEXT , _lowercase : int=True ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = model_outputs['''generated_sequence'''][0]
SCREAMING_SNAKE_CASE__ : List[Any] = model_outputs['''input_ids''']
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model_outputs['''prompt_text''']
SCREAMING_SNAKE_CASE__ : Optional[int] = generated_sequence.numpy().tolist()
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
SCREAMING_SNAKE_CASE__ : Dict = {'''generated_token_ids''': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
SCREAMING_SNAKE_CASE__ : List[str] = self.tokenizer.decode(
_lowercase , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
else:
SCREAMING_SNAKE_CASE__ : List[Any] = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase , ) )
if return_type == ReturnType.FULL_TEXT:
SCREAMING_SNAKE_CASE__ : Optional[int] = prompt_text + text[prompt_length:]
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = text[prompt_length:]
SCREAMING_SNAKE_CASE__ : int = {'''generated_text''': all_text}
records.append(_lowercase )
return records
| 35 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase_ : List[Any] = {
'configuration_conditional_detr': [
'CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ConditionalDetrConfig',
'ConditionalDetrOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[int] = ['ConditionalDetrFeatureExtractor']
UpperCAmelCase_ : str = ['ConditionalDetrImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : str = [
'CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConditionalDetrForObjectDetection',
'ConditionalDetrForSegmentation',
'ConditionalDetrModel',
'ConditionalDetrPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 44 | 0 |
def lowercase ( __A : int = 100 ) -> int:
'''simple docstring'''
snake_case : Tuple = 0
snake_case : Tuple = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(f'''{solution() = }''')
| 36 |
'''simple docstring'''
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Tuple = tmp_path / "file.csv"
_lowerCamelCase : Optional[int] = textwrap.dedent(
"\\n header1,header2\n 1,2\n 10,20\n " )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def A_ ( _lowerCAmelCase : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Any = tmp_path / "malformed_file.csv"
_lowerCamelCase : Any = textwrap.dedent(
"\\n header1,header2\n 1,2\n 10,20,\n " )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : int = tmp_path / "csv_with_image.csv"
_lowerCamelCase : int = textwrap.dedent(
F'\\n image\n {image_file}\n ' )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : Dict = tmp_path / "csv_with_label.csv"
_lowerCamelCase : int = textwrap.dedent(
"\\n label\n good\n bad\n good\n " )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def A_ ( _lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : Dict = tmp_path / "csv_with_int_list.csv"
_lowerCamelCase : Any = textwrap.dedent(
"\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n " )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : List[Any] = Csv()
_lowerCamelCase : Any = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(_lowerCAmelCase , match="Error tokenizing data" ):
for _ in generator:
pass
assert any(
record.levelname == "ERROR"
and "Failed to read file" in record.message
and os.path.basename(_lowerCAmelCase ) in record.message
for record in caplog.records )
@require_pil
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
with open(_lowerCAmelCase , encoding="utf-8" ) as f:
_lowerCamelCase : Any = f.read().splitlines()[1]
_lowerCamelCase : Optional[Any] = Csv(encoding="utf-8" , features=Features({"image": Image()} ) )
_lowerCamelCase : Union[str, Any] = csv._generate_tables([[csv_file_with_image]] )
_lowerCamelCase : List[str] = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("image" ).type == Image()()
_lowerCamelCase : int = pa_table.to_pydict()["image"]
assert generated_content == [{"path": image_file, "bytes": None}]
def A_ ( _lowerCAmelCase : List[Any] ):
"""simple docstring"""
with open(_lowerCAmelCase , encoding="utf-8" ) as f:
_lowerCamelCase : List[Any] = f.read().splitlines()[1:]
_lowerCamelCase : int = Csv(encoding="utf-8" , features=Features({"label": ClassLabel(names=["good", "bad"] )} ) )
_lowerCamelCase : Tuple = csv._generate_tables([[csv_file_with_label]] )
_lowerCamelCase : int = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("label" ).type == ClassLabel(names=["good", "bad"] )()
_lowerCamelCase : Union[str, Any] = pa_table.to_pydict()["label"]
assert generated_content == [ClassLabel(names=["good", "bad"] ).straint(_lowerCAmelCase ) for label in labels]
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : Dict = Csv(encoding="utf-8" , sep="," , converters={"int_list": lambda _lowerCAmelCase : [int(_lowerCAmelCase ) for i in x.split()]} )
_lowerCamelCase : List[Any] = csv._generate_tables([[csv_file_with_int_list]] )
_lowerCamelCase : Optional[int] = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field("int_list" ).type )
_lowerCamelCase : Optional[Any] = pa_table.to_pydict()["int_list"]
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 44 | 0 |
from math import sqrt
def UpperCamelCase_ ( __a ) -> int:
a__ : Union[str, Any] = 0
for i in range(1 , int(sqrt(__a ) + 1 ) ):
if n % i == 0 and i != sqrt(__a ):
total += i + n // i
elif i == sqrt(__a ):
total += i
return total - n
def UpperCamelCase_ ( __a = 10_000 ) -> int:
a__ : str = sum(
i
for i in range(1 , __a )
if sum_of_divisors(sum_of_divisors(__a ) ) == i and sum_of_divisors(__a ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 37 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class UpperCAmelCase__ ( A , A , unittest.TestCase ):
lowerCAmelCase_ = IFInpaintingSuperResolutionPipeline
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'original_image'} )
lowerCAmelCase_ = PipelineTesterMixin.required_optional_params - {'latents'}
def lowerCamelCase_ ( self : List[str] ):
return self._get_superresolution_dummy_components()
def lowerCamelCase_ ( self : str,__A : List[str],__A : List[str]=0 ):
if str(__A ).startswith("mps" ):
_lowerCamelCase : List[str] = torch.manual_seed(__A )
else:
_lowerCamelCase : Optional[int] = torch.Generator(device=__A ).manual_seed(__A )
_lowerCamelCase : List[Any] = floats_tensor((1, 3, 1_6, 1_6),rng=random.Random(__A ) ).to(__A )
_lowerCamelCase : Any = floats_tensor((1, 3, 3_2, 3_2),rng=random.Random(__A ) ).to(__A )
_lowerCamelCase : Tuple = floats_tensor((1, 3, 3_2, 3_2),rng=random.Random(__A ) ).to(__A )
_lowerCamelCase : Dict = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available(),reason="XFormers attention is only available with CUDA and `xformers` installed",)
def lowerCamelCase_ ( self : Optional[int] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def lowerCamelCase_ ( self : Dict ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda",reason="float16 requires CUDA" )
def lowerCamelCase_ ( self : Optional[Any] ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def lowerCamelCase_ ( self : Any ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def lowerCamelCase_ ( self : Dict ):
self._test_save_load_local()
def lowerCamelCase_ ( self : Any ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2,)
| 44 | 0 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
A_ : Optional[int] = logging.get_logger(__name__)
A_ : List[str] = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
A_ : List[Any] = {
"vocab_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"
},
"merges_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"
},
"tokenizer_config_file": {
"facebook/blenderbot_small-90M": (
"https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"
)
},
}
A_ : Optional[Any] = {
"facebook/blenderbot_small-90M": 512,
}
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = BlenderbotSmallTokenizer
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="<|endoftext|>" , __SCREAMING_SNAKE_CASE="<|endoftext|>" , __SCREAMING_SNAKE_CASE="<|endoftext|>" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , **__SCREAMING_SNAKE_CASE , ):
super().__init__(
ByteLevelBPETokenizer(
vocab=__SCREAMING_SNAKE_CASE , merges=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE , ) , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
snake_case__ : str = add_prefix_space
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ):
snake_case__ : int = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
snake_case__ : Union[str, Any] = [self.sep_token_id]
snake_case__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 38 |
'''simple docstring'''
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class UpperCAmelCase__ ( A ):
def __init__( self : List[Any],__A : Tuple,__A : Optional[int],__A : Optional[int]=1_0_2_4,__A : int=1_0_2_4,__A : Any=3.6 ):
_lowerCamelCase : List[str] = tokenizer
_lowerCamelCase : Dict = tokenizer.bos_token_id
_lowerCamelCase : Tuple = dataset
_lowerCamelCase : Any = seq_length
_lowerCamelCase : List[Any] = seq_length * chars_per_token * num_of_sequences
def __iter__( self : Tuple ):
_lowerCamelCase : Union[str, Any] = iter(self.dataset )
_lowerCamelCase : str = True
while more_examples:
_lowerCamelCase , _lowerCamelCase : Optional[int] = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(__A )["content"] )
buffer_len += len(buffer[-1] )
except StopIteration:
_lowerCamelCase : Tuple = False
break
_lowerCamelCase : int = tokenizer(__A,truncation=__A )["input_ids"]
_lowerCamelCase : int = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0,len(__A ),self.seq_length ):
_lowerCamelCase : List[str] = all_token_ids[i : i + self.seq_length]
if len(__A ) == self.seq_length:
yield torch.tensor(__A )
def A_ ( _lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = {"streaming": True}
_lowerCamelCase : Optional[Any] = load_dataset(args.dataset_name , split="train" , **_lowerCAmelCase )
_lowerCamelCase : int = ConstantLengthDataset(_lowerCAmelCase , _lowerCAmelCase , seq_length=args.seq_length )
_lowerCamelCase : Dict = DataLoader(_lowerCAmelCase , batch_size=args.batch_size )
return eval_dataloader
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
model.eval()
_lowerCamelCase : Optional[int] = []
for step, batch in enumerate(_lowerCAmelCase ):
with torch.no_grad():
_lowerCamelCase : List[str] = model(_lowerCAmelCase , labels=_lowerCAmelCase )
_lowerCamelCase : List[Any] = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(_lowerCAmelCase ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
_lowerCamelCase : Dict = torch.mean(torch.cat(_lowerCAmelCase ) )
try:
_lowerCamelCase : List[Any] = torch.exp(_lowerCAmelCase )
except OverflowError:
_lowerCamelCase : Optional[int] = float("inf" )
return loss.item(), perplexity.item()
# Setup Accelerator
UpperCAmelCase_ : List[str] = Accelerator()
# Parse configuration
UpperCAmelCase_ : Tuple = HfArgumentParser(EvaluationArguments)
UpperCAmelCase_ : Dict = parser.parse_args()
set_seed(args.seed)
# Logging
UpperCAmelCase_ : Optional[int] = logging.getLogger(__name__)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
# Load model and tokenizer
UpperCAmelCase_ : Tuple = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
UpperCAmelCase_ : int = create_dataloader(args)
# Prepare everything with our `accelerator`.
UpperCAmelCase_, UpperCAmelCase_ : Dict = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('Evaluating and saving model after training')
UpperCAmelCase_, UpperCAmelCase_ : str = evaluate(args)
logger.info(f'''loss/eval: {eval_loss}, perplexity: {perplexity}''')
| 44 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''',
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = "data2vec-text"
def __init__( self : Dict , _UpperCamelCase : int=3_0_5_2_2 , _UpperCamelCase : Dict=7_6_8 , _UpperCamelCase : Tuple=1_2 , _UpperCamelCase : List[Any]=1_2 , _UpperCamelCase : Tuple=3_0_7_2 , _UpperCamelCase : Union[str, Any]="gelu" , _UpperCamelCase : Optional[int]=0.1 , _UpperCamelCase : List[Any]=0.1 , _UpperCamelCase : Any=5_1_2 , _UpperCamelCase : str=2 , _UpperCamelCase : List[str]=0.02 , _UpperCamelCase : List[str]=1e-12 , _UpperCamelCase : Any=1 , _UpperCamelCase : Union[str, Any]=0 , _UpperCamelCase : Optional[Any]=2 , _UpperCamelCase : Optional[Any]="absolute" , _UpperCamelCase : int=True , _UpperCamelCase : int=None , **_UpperCamelCase : Union[str, Any] , ) ->Dict:
super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = hidden_act
snake_case_ = intermediate_size
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = position_embedding_type
snake_case_ = use_cache
snake_case_ = classifier_dropout
class snake_case_ ( __A ):
'''simple docstring'''
@property
def snake_case__( self : List[Any] ) ->Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
snake_case_ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
snake_case_ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 39 |
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : List[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase_ : Union[str, Any] = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
UpperCAmelCase_ : List[str] = {
'allenai/led-base-16384': 1_6384,
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = LEDTokenizer
lowerCAmelCase_ = ['input_ids', 'attention_mask']
def __init__( self : Union[str, Any],__A : List[Any]=None,__A : str=None,__A : str=None,__A : Optional[int]="replace",__A : Union[str, Any]="<s>",__A : Union[str, Any]="</s>",__A : Any="</s>",__A : Optional[int]="<s>",__A : List[str]="<unk>",__A : str="<pad>",__A : Tuple="<mask>",__A : Union[str, Any]=False,__A : Optional[int]=True,**__A : Optional[int],):
super().__init__(
__A,__A,tokenizer_file=__A,errors=__A,bos_token=__A,eos_token=__A,sep_token=__A,cls_token=__A,unk_token=__A,pad_token=__A,mask_token=__A,add_prefix_space=__A,trim_offsets=__A,**__A,)
_lowerCamelCase : List[str] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space",__A ) != add_prefix_space:
_lowerCamelCase : str = getattr(__A,pre_tok_state.pop("type" ) )
_lowerCamelCase : List[Any] = add_prefix_space
_lowerCamelCase : Tuple = pre_tok_class(**__A )
_lowerCamelCase : Optional[int] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_lowerCamelCase : List[str] = "post_processor"
_lowerCamelCase : int = getattr(self.backend_tokenizer,__A,__A )
if tokenizer_component_instance:
_lowerCamelCase : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCamelCase : str = tuple(state["sep"] )
if "cls" in state:
_lowerCamelCase : List[str] = tuple(state["cls"] )
_lowerCamelCase : Dict = False
if state.get("add_prefix_space",__A ) != add_prefix_space:
_lowerCamelCase : List[str] = add_prefix_space
_lowerCamelCase : List[Any] = True
if state.get("trim_offsets",__A ) != trim_offsets:
_lowerCamelCase : List[str] = trim_offsets
_lowerCamelCase : List[str] = True
if changes_to_apply:
_lowerCamelCase : Tuple = getattr(__A,state.pop("type" ) )
_lowerCamelCase : Any = component_class(**__A )
setattr(self.backend_tokenizer,__A,__A )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def lowerCamelCase_ ( self : str ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCamelCase_ ( self : List[str],__A : str ):
_lowerCamelCase : Optional[Any] = AddedToken(__A,lstrip=__A,rstrip=__A ) if isinstance(__A,__A ) else value
_lowerCamelCase : str = value
def lowerCamelCase_ ( self : List[str],*__A : List[Any],**__A : int ):
_lowerCamelCase : List[str] = kwargs.get("is_split_into_words",__A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : Optional[int],*__A : Optional[Any],**__A : Union[str, Any] ):
_lowerCamelCase : List[Any] = kwargs.get("is_split_into_words",__A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : Dict,__A : str,__A : Optional[str] = None ):
_lowerCamelCase : List[str] = self._tokenizer.model.save(__A,name=__A )
return tuple(__A )
def lowerCamelCase_ ( self : List[str],__A : Optional[Any],__A : List[str]=None ):
_lowerCamelCase : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self : Dict,__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : Tuple = [self.sep_token_id]
_lowerCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase_ ( self : Any,__A : Union[Dict[str, EncodedInput], BatchEncoding],__A : Optional[int] = None,__A : PaddingStrategy = PaddingStrategy.DO_NOT_PAD,__A : Optional[int] = None,__A : Optional[bool] = None,):
_lowerCamelCase : List[str] = super()._pad(
encoded_inputs=__A,max_length=__A,padding_strategy=__A,pad_to_multiple_of=__A,return_attention_mask=__A,)
# Load from model defaults
if return_attention_mask is None:
_lowerCamelCase : Any = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
_lowerCamelCase : Union[str, Any] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
_lowerCamelCase : Optional[Any] = len(encoded_inputs["global_attention_mask"] ) != len(__A )
if needs_to_be_padded:
_lowerCamelCase : str = len(__A ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
_lowerCamelCase : Tuple = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
_lowerCamelCase : int = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 44 | 0 |
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def UpperCamelCase ( snake_case__ : List[Any] ) -> Optional[Any]:
if is_torch_version('<' , '2.0.0' ) or not hasattr(snake_case__ , '_dynamo' ):
return False
return isinstance(snake_case__ , torch._dynamo.eval_frame.OptimizedModule )
def UpperCamelCase ( snake_case__ : str , snake_case__ : bool = True ) -> Tuple:
UpperCamelCase : Tuple = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
UpperCamelCase : Optional[Any] = is_compiled_module(snake_case__ )
if is_compiled:
UpperCamelCase : List[str] = model
UpperCamelCase : Tuple = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(snake_case__ , snake_case__ ):
UpperCamelCase : List[str] = model.module
if not keep_fpaa_wrapper:
UpperCamelCase : Optional[int] = getattr(snake_case__ , 'forward' )
UpperCamelCase : int = model.__dict__.pop('_original_forward' , snake_case__ )
if original_forward is not None:
while hasattr(snake_case__ , '__wrapped__' ):
UpperCamelCase : Any = forward.__wrapped__
if forward == original_forward:
break
UpperCamelCase : Any = forward
if getattr(snake_case__ , '_converted_to_transformer_engine' , snake_case__ ):
convert_model(snake_case__ , to_transformer_engine=snake_case__ )
if is_compiled:
UpperCamelCase : Tuple = model
UpperCamelCase : int = compiled_model
return model
def UpperCamelCase ( ) -> Any:
PartialState().wait_for_everyone()
def UpperCamelCase ( snake_case__ : Optional[Any] , snake_case__ : Dict ) -> int:
if PartialState().distributed_type == DistributedType.TPU:
xm.save(snake_case__ , snake_case__ )
elif PartialState().local_process_index == 0:
torch.save(snake_case__ , snake_case__ )
@contextmanager
def UpperCamelCase ( **snake_case__ : str ) -> str:
for key, value in kwargs.items():
UpperCamelCase : int = str(snake_case__ )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def UpperCamelCase ( snake_case__ : str ) -> Any:
if not hasattr(snake_case__ , '__qualname__' ) and not hasattr(snake_case__ , '__name__' ):
UpperCamelCase : Tuple = getattr(snake_case__ , '__class__' , snake_case__ )
if hasattr(snake_case__ , '__qualname__' ):
return obj.__qualname__
if hasattr(snake_case__ , '__name__' ):
return obj.__name__
return str(snake_case__ )
def UpperCamelCase ( snake_case__ : Any , snake_case__ : Optional[Any] ) -> Tuple:
for key, value in source.items():
if isinstance(snake_case__ , snake_case__ ):
UpperCamelCase : Optional[Any] = destination.setdefault(snake_case__ , {} )
merge_dicts(snake_case__ , snake_case__ )
else:
UpperCamelCase : List[Any] = value
return destination
def UpperCamelCase ( snake_case__ : int = None ) -> bool:
if port is None:
UpperCamelCase : Union[str, Any] = 29500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('localhost', port) ) == 0
| 40 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
def A_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int]=False ):
"""simple docstring"""
_lowerCamelCase : List[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'blocks.{i}.norm1.weight', F'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'blocks.{i}.norm1.bias', F'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((F'blocks.{i}.attn.proj.weight', F'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((F'blocks.{i}.attn.proj.bias', F'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'blocks.{i}.norm2.weight', F'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'blocks.{i}.norm2.bias', F'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc1.weight', F'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc1.bias', F'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc2.weight', F'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc2.bias', F'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_lowerCamelCase : Optional[int] = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def A_ ( _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any]=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
_lowerCamelCase : int = ""
else:
_lowerCamelCase : int = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCamelCase : Any = state_dict.pop(F'blocks.{i}.attn.qkv.weight' )
_lowerCamelCase : Tuple = state_dict.pop(F'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : List[str] = in_proj_weight[
: config.hidden_size, :
]
_lowerCamelCase : List[str] = in_proj_bias[: config.hidden_size]
_lowerCamelCase : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCamelCase : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCamelCase : Any = in_proj_weight[
-config.hidden_size :, :
]
_lowerCamelCase : List[str] = in_proj_bias[-config.hidden_size :]
def A_ ( _lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : List[str] = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = dct.pop(_lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = val
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : Optional[int] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def A_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : str = ViTConfig()
_lowerCamelCase : List[str] = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
_lowerCamelCase : Optional[Any] = True
_lowerCamelCase : Optional[Any] = int(vit_name[-12:-10] )
_lowerCamelCase : str = int(vit_name[-9:-6] )
else:
_lowerCamelCase : List[Any] = 1000
_lowerCamelCase : str = "huggingface/label-files"
_lowerCamelCase : Any = "imagenet-1k-id2label.json"
_lowerCamelCase : int = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" ) , "r" ) )
_lowerCamelCase : str = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
_lowerCamelCase : Optional[Any] = idalabel
_lowerCamelCase : List[str] = {v: k for k, v in idalabel.items()}
_lowerCamelCase : List[str] = int(vit_name[-6:-4] )
_lowerCamelCase : str = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith("tiny" ):
_lowerCamelCase : List[Any] = 192
_lowerCamelCase : Optional[int] = 768
_lowerCamelCase : Union[str, Any] = 12
_lowerCamelCase : Optional[Any] = 3
elif vit_name[9:].startswith("small" ):
_lowerCamelCase : Optional[Any] = 384
_lowerCamelCase : Optional[Any] = 1536
_lowerCamelCase : int = 12
_lowerCamelCase : List[str] = 6
else:
pass
else:
if vit_name[4:].startswith("small" ):
_lowerCamelCase : List[str] = 768
_lowerCamelCase : Optional[Any] = 2304
_lowerCamelCase : List[Any] = 8
_lowerCamelCase : List[Any] = 8
elif vit_name[4:].startswith("base" ):
pass
elif vit_name[4:].startswith("large" ):
_lowerCamelCase : List[Any] = 1024
_lowerCamelCase : Optional[Any] = 4096
_lowerCamelCase : List[Any] = 24
_lowerCamelCase : Union[str, Any] = 16
elif vit_name[4:].startswith("huge" ):
_lowerCamelCase : str = 1280
_lowerCamelCase : List[Any] = 5120
_lowerCamelCase : List[str] = 32
_lowerCamelCase : List[str] = 16
# load original model from timm
_lowerCamelCase : int = timm.create_model(_lowerCAmelCase , pretrained=_lowerCAmelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_lowerCamelCase : Any = timm_model.state_dict()
if base_model:
remove_classification_head_(_lowerCAmelCase )
_lowerCamelCase : Optional[int] = create_rename_keys(_lowerCAmelCase , _lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# load HuggingFace model
if vit_name[-5:] == "in21k":
_lowerCamelCase : int = ViTModel(_lowerCAmelCase ).eval()
else:
_lowerCamelCase : List[str] = ViTForImageClassification(_lowerCAmelCase ).eval()
model.load_state_dict(_lowerCAmelCase )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
_lowerCamelCase : Union[str, Any] = DeiTImageProcessor(size=config.image_size )
else:
_lowerCamelCase : Union[str, Any] = ViTImageProcessor(size=config.image_size )
_lowerCamelCase : Optional[int] = image_processor(images=prepare_img() , return_tensors="pt" )
_lowerCamelCase : Optional[int] = encoding["pixel_values"]
_lowerCamelCase : Union[str, Any] = model(_lowerCAmelCase )
if base_model:
_lowerCamelCase : int = timm_model.forward_features(_lowerCAmelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_lowerCAmelCase , outputs.pooler_output , atol=1E-3 )
else:
_lowerCamelCase : Union[str, Any] = timm_model(_lowerCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCAmelCase , outputs.logits , atol=1E-3 )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(F'Saving model {vit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(_lowerCAmelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_patch16_224',
type=str,
help='Name of the ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
UpperCAmelCase_ : Optional[int] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 44 | 0 |
'''simple docstring'''
def _A ( ):
"""simple docstring"""
__lowercase = 0
for i in range(1 , 1001 ):
total += i**i
return str(A__ )[-10:]
if __name__ == "__main__":
print(solution())
| 41 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : int = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def A_ ( _lowerCAmelCase : int = 5000 ):
"""simple docstring"""
_lowerCamelCase : Dict = [(i * (3 * i - 1)) // 2 for i in range(1 , _lowerCAmelCase )]
for i, pentagonal_i in enumerate(_lowerCAmelCase ):
for j in range(_lowerCAmelCase , len(_lowerCAmelCase ) ):
_lowerCamelCase : List[Any] = pentagonal_nums[j]
_lowerCamelCase : Any = pentagonal_i + pentagonal_j
_lowerCamelCase : Union[str, Any] = pentagonal_j - pentagonal_i
if is_pentagonal(_lowerCAmelCase ) and is_pentagonal(_lowerCAmelCase ):
return b
return -1
if __name__ == "__main__":
print(f'''{solution() = }''')
| 44 | 0 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
A_ = random.Random()
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase=1.0 ,__UpperCamelCase=None ,__UpperCamelCase=None ) -> Dict:
if rng is None:
lowerCamelCase_ = global_rng
lowerCamelCase_ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=400 , SCREAMING_SNAKE_CASE_=2000 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=16000 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=80 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_="hann_window" , SCREAMING_SNAKE_CASE_=80 , SCREAMING_SNAKE_CASE_=7600 , SCREAMING_SNAKE_CASE_=1E-10 , SCREAMING_SNAKE_CASE_=True , ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = min_seq_length
lowerCamelCase_ = max_seq_length
lowerCamelCase_ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCamelCase_ = feature_size
lowerCamelCase_ = padding_value
lowerCamelCase_ = sampling_rate
lowerCamelCase_ = do_normalize
lowerCamelCase_ = num_mel_bins
lowerCamelCase_ = hop_length
lowerCamelCase_ = win_length
lowerCamelCase_ = win_function
lowerCamelCase_ = fmin
lowerCamelCase_ = fmax
lowerCamelCase_ = mel_floor
lowerCamelCase_ = return_attention_mask
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False ) -> List[str]:
'''simple docstring'''
def _flatten(SCREAMING_SNAKE_CASE_ ):
return list(itertools.chain(*SCREAMING_SNAKE_CASE_ ) )
if equal_length:
lowerCamelCase_ = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
lowerCamelCase_ = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCamelCase_ = [np.asarray(SCREAMING_SNAKE_CASE_ ) for x in speech_inputs]
return speech_inputs
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False ) -> Tuple:
'''simple docstring'''
if equal_length:
lowerCamelCase_ = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowerCamelCase_ = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCamelCase_ = [np.asarray(SCREAMING_SNAKE_CASE_ ) for x in speech_inputs]
return speech_inputs
@require_torch
class UpperCAmelCase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = SpeechTaFeatureExtractor
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = SpeechTaFeatureExtractionTester(self )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
'''simple docstring'''
self.assertTrue(np.all(np.mean(SCREAMING_SNAKE_CASE_ , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(SCREAMING_SNAKE_CASE_ , axis=0 ) - 1 ) < 1E-3 ) )
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCamelCase_ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowerCamelCase_ = [np.asarray(SCREAMING_SNAKE_CASE_ ) for speech_input in speech_inputs]
# Test not batched input
lowerCamelCase_ = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values
lowerCamelCase_ = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) )
# Test batched
lowerCamelCase_ = feat_extract(SCREAMING_SNAKE_CASE_ , return_tensors='np' ).input_values
lowerCamelCase_ = feat_extract(SCREAMING_SNAKE_CASE_ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase_ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowerCamelCase_ = ['longest', 'max_length', 'do_not_pad']
lowerCamelCase_ = [None, 1600, None]
for max_length, padding in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = feat_extract(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , return_tensors='np' )
lowerCamelCase_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase_ = range(800 , 1400 , 200 )
lowerCamelCase_ = [floats_list((1, x) )[0] for x in lengths]
lowerCamelCase_ = ['longest', 'max_length', 'do_not_pad']
lowerCamelCase_ = [None, 1600, None]
for max_length, padding in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = feat_extract(SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase_ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowerCamelCase_ = feat_extract(
SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=1000 , padding='max_length' , return_tensors='np' )
lowerCamelCase_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase_ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowerCamelCase_ = feat_extract(
SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=1000 , padding='longest' , return_tensors='np' )
lowerCamelCase_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
lowerCamelCase_ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowerCamelCase_ = feat_extract(
SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=2000 , padding='longest' , return_tensors='np' )
lowerCamelCase_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
def UpperCamelCase( self ) -> int:
'''simple docstring'''
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase_ = np.random.rand(100 ).astype(np.floataa )
lowerCamelCase_ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCamelCase_ = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
lowerCamelCase_ = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCamelCase_ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowerCamelCase_ = [np.asarray(SCREAMING_SNAKE_CASE_ ) for speech_input in speech_inputs]
# Test feature size
lowerCamelCase_ = feature_extractor(audio_target=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , return_tensors='np' ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
lowerCamelCase_ = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_values
lowerCamelCase_ = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) )
# Test batched
lowerCamelCase_ = feature_extractor(SCREAMING_SNAKE_CASE_ , return_tensors='np' ).input_values
lowerCamelCase_ = feature_extractor(SCREAMING_SNAKE_CASE_ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
lowerCamelCase_ = [floats_list((1, x) )[0] for x in (800, 800, 800)]
lowerCamelCase_ = np.asarray(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = feature_extractor(SCREAMING_SNAKE_CASE_ , return_tensors='np' ).input_values
lowerCamelCase_ = feature_extractor(SCREAMING_SNAKE_CASE_ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) )
def UpperCamelCase( self ) -> int:
'''simple docstring'''
lowerCamelCase_ = self.feat_extract_tester.prepare_inputs_for_target()
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_dict )
lowerCamelCase_ = feat_extract.model_input_names[0]
lowerCamelCase_ = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ ) for x, y in zip(SCREAMING_SNAKE_CASE_ , processed_features[input_name] ) ) )
lowerCamelCase_ = self.feat_extract_tester.prepare_inputs_for_target(equal_length=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = BatchFeature({input_name: speech_inputs} , tensor_type='np' )
lowerCamelCase_ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowerCamelCase_ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = self.feat_extract_tester.prepare_inputs_for_target(equal_length=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_dict )
lowerCamelCase_ = feat_extract.model_input_names[0]
lowerCamelCase_ = BatchFeature({input_name: speech_inputs} , tensor_type='pt' )
lowerCamelCase_ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowerCamelCase_ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_dict )
lowerCamelCase_ = self.feat_extract_tester.prepare_inputs_for_target()
lowerCamelCase_ = feat_extract.model_input_names[0]
lowerCamelCase_ = BatchFeature({input_name: speech_inputs} )
lowerCamelCase_ = feat_extract.num_mel_bins # hack!
lowerCamelCase_ = feat_extract.pad(SCREAMING_SNAKE_CASE_ , padding='longest' , return_tensors='np' )[input_name]
lowerCamelCase_ = feat_extract.pad(SCREAMING_SNAKE_CASE_ , padding='longest' , return_tensors='pt' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = self.feat_extract_dict
lowerCamelCase_ = True
lowerCamelCase_ = self.feature_extraction_class(**SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.feat_extract_tester.prepare_inputs_for_target()
lowerCamelCase_ = [len(SCREAMING_SNAKE_CASE_ ) for x in speech_inputs]
lowerCamelCase_ = feat_extract.model_input_names[0]
lowerCamelCase_ = BatchFeature({input_name: speech_inputs} )
lowerCamelCase_ = feat_extract.num_mel_bins # hack!
lowerCamelCase_ = feat_extract.pad(SCREAMING_SNAKE_CASE_ , padding='longest' , return_tensors='np' )
self.assertIn('attention_mask' , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = self.feat_extract_dict
lowerCamelCase_ = True
lowerCamelCase_ = self.feature_extraction_class(**SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.feat_extract_tester.prepare_inputs_for_target()
lowerCamelCase_ = [len(SCREAMING_SNAKE_CASE_ ) for x in speech_inputs]
lowerCamelCase_ = feat_extract.model_input_names[0]
lowerCamelCase_ = BatchFeature({input_name: speech_inputs} )
lowerCamelCase_ = min(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = feat_extract.num_mel_bins # hack!
lowerCamelCase_ = feat_extract.pad(
SCREAMING_SNAKE_CASE_ , padding='max_length' , max_length=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , return_tensors='np' )
self.assertIn('attention_mask' , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
'''simple docstring'''
from datasets import load_dataset
lowerCamelCase_ = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
lowerCamelCase_ = ds.sort('id' ).select(range(SCREAMING_SNAKE_CASE_ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = torch.tensor(
[2.3804E-03, 2.0752E-03, 1.9836E-03, 2.1057E-03, 1.6174E-03,
3.0518E-04, 9.1553E-05, 3.3569E-04, 9.7656E-04, 1.8311E-03,
2.0142E-03, 2.1057E-03, 1.7395E-03, 4.5776E-04, -3.9673E-04,
4.5776E-04, 1.0071E-03, 9.1553E-05, 4.8828E-04, 1.1597E-03,
7.3242E-04, 9.4604E-04, 1.8005E-03, 1.8311E-03, 8.8501E-04,
4.2725E-04, 4.8828E-04, 7.3242E-04, 1.0986E-03, 2.1057E-03] )
# fmt: on
lowerCamelCase_ = self._load_datasamples(1 )
lowerCamelCase_ = SpeechTaFeatureExtractor()
lowerCamelCase_ = feature_extractor(SCREAMING_SNAKE_CASE_ , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 93680) )
self.assertTrue(torch.allclose(input_values[0, :30] , SCREAMING_SNAKE_CASE_ , atol=1E-6 ) )
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = torch.tensor(
[-2.6_870, -3.0_104, -3.1_356, -3.5_352, -3.0_044, -3.0_353, -3.4_719, -3.6_777,
-3.1_520, -2.9_435, -2.6_553, -2.8_795, -2.9_944, -2.5_921, -3.0_279, -3.0_386,
-3.0_864, -3.1_291, -3.2_353, -2.7_444, -2.6_831, -2.7_287, -3.1_761, -3.1_571,
-3.2_726, -3.0_582, -3.1_007, -3.4_533, -3.4_695, -3.0_998] )
# fmt: on
lowerCamelCase_ = self._load_datasamples(1 )
lowerCamelCase_ = SpeechTaFeatureExtractor()
lowerCamelCase_ = feature_extractor(audio_target=SCREAMING_SNAKE_CASE_ , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 366, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
| 42 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ : List[Any] = {
'configuration_mobilebert': [
'MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'MobileBertConfig',
'MobileBertOnnxConfig',
],
'tokenization_mobilebert': ['MobileBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[Any] = ['MobileBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[str] = [
'MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileBertForMaskedLM',
'MobileBertForMultipleChoice',
'MobileBertForNextSentencePrediction',
'MobileBertForPreTraining',
'MobileBertForQuestionAnswering',
'MobileBertForSequenceClassification',
'MobileBertForTokenClassification',
'MobileBertLayer',
'MobileBertModel',
'MobileBertPreTrainedModel',
'load_tf_weights_in_mobilebert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Union[str, Any] = [
'TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileBertForMaskedLM',
'TFMobileBertForMultipleChoice',
'TFMobileBertForNextSentencePrediction',
'TFMobileBertForPreTraining',
'TFMobileBertForQuestionAnswering',
'TFMobileBertForSequenceClassification',
'TFMobileBertForTokenClassification',
'TFMobileBertMainLayer',
'TFMobileBertModel',
'TFMobileBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 44 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
lowerCAmelCase = logging.get_logger(__name__)
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(SCREAMING_SNAKE_CASE ):
return [[videos]]
raise ValueError(f'Could not make batched video from {videos}' )
class _a ( UpperCamelCase__ ):
_lowercase : Tuple = ['''pixel_values''']
def __init__( self: List[str] , UpperCamelCase_: bool = True , UpperCamelCase_: Dict[str, int] = None , UpperCamelCase_: PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase_: bool = True , UpperCamelCase_: Dict[str, int] = None , UpperCamelCase_: bool = True , UpperCamelCase_: Union[int, float] = 1 / 255 , UpperCamelCase_: bool = True , UpperCamelCase_: bool = True , UpperCamelCase_: Optional[Union[float, List[float]]] = None , UpperCamelCase_: Optional[Union[float, List[float]]] = None , **UpperCamelCase_: List[str] , ) -> None:
"""simple docstring"""
super().__init__(**UpperCamelCase_ )
lowercase__ = size if size is not None else {'''shortest_edge''': 256}
lowercase__ = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ )
lowercase__ = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
lowercase__ = get_size_dict(UpperCamelCase_ , param_name='''crop_size''' )
lowercase__ = do_resize
lowercase__ = size
lowercase__ = do_center_crop
lowercase__ = crop_size
lowercase__ = resample
lowercase__ = do_rescale
lowercase__ = rescale_factor
lowercase__ = offset
lowercase__ = do_normalize
lowercase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: np.ndarray , UpperCamelCase_: Dict[str, int] , UpperCamelCase_: PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase_: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_: Union[str, Any] , ) -> np.ndarray:
"""simple docstring"""
lowercase__ = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ )
if "shortest_edge" in size:
lowercase__ = get_resize_output_image_size(UpperCamelCase_ , size['''shortest_edge'''] , default_to_square=UpperCamelCase_ )
elif "height" in size and "width" in size:
lowercase__ = (size['''height'''], size['''width'''])
else:
raise ValueError(f'Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' )
return resize(UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase_ ( self: Tuple , UpperCamelCase_: np.ndarray , UpperCamelCase_: Dict[str, int] , UpperCamelCase_: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_: Tuple , ) -> np.ndarray:
"""simple docstring"""
lowercase__ = get_size_dict(UpperCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f'Size must have \'height\' and \'width\' as keys. Got {size.keys()}' )
return center_crop(UpperCamelCase_ , size=(size['''height'''], size['''width''']) , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase_ ( self: Dict , UpperCamelCase_: np.ndarray , UpperCamelCase_: Union[int, float] , UpperCamelCase_: bool = True , UpperCamelCase_: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_: Optional[int] , ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = image.astype(np.floataa )
if offset:
lowercase__ = image - (scale / 2)
return rescale(UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: np.ndarray , UpperCamelCase_: Union[float, List[float]] , UpperCamelCase_: Union[float, List[float]] , UpperCamelCase_: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_: Any , ) -> np.ndarray:
"""simple docstring"""
return normalize(UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: ImageInput , UpperCamelCase_: bool = None , UpperCamelCase_: Dict[str, int] = None , UpperCamelCase_: PILImageResampling = None , UpperCamelCase_: bool = None , UpperCamelCase_: Dict[str, int] = None , UpperCamelCase_: bool = None , UpperCamelCase_: float = None , UpperCamelCase_: bool = None , UpperCamelCase_: bool = None , UpperCamelCase_: Optional[Union[float, List[float]]] = None , UpperCamelCase_: Optional[Union[float, List[float]]] = None , UpperCamelCase_: Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray:
"""simple docstring"""
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
if offset and not do_rescale:
raise ValueError('''For offset, do_rescale must also be set to True.''' )
# All transformations expect numpy arrays.
lowercase__ = to_numpy_array(UpperCamelCase_ )
if do_resize:
lowercase__ = self.resize(image=UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ )
if do_center_crop:
lowercase__ = self.center_crop(UpperCamelCase_ , size=UpperCamelCase_ )
if do_rescale:
lowercase__ = self.rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ , offset=UpperCamelCase_ )
if do_normalize:
lowercase__ = self.normalize(image=UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ )
lowercase__ = to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ )
return image
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: ImageInput , UpperCamelCase_: bool = None , UpperCamelCase_: Dict[str, int] = None , UpperCamelCase_: PILImageResampling = None , UpperCamelCase_: bool = None , UpperCamelCase_: Dict[str, int] = None , UpperCamelCase_: bool = None , UpperCamelCase_: float = None , UpperCamelCase_: bool = None , UpperCamelCase_: bool = None , UpperCamelCase_: Optional[Union[float, List[float]]] = None , UpperCamelCase_: Optional[Union[float, List[float]]] = None , UpperCamelCase_: Optional[Union[str, TensorType]] = None , UpperCamelCase_: ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase_: Tuple , ) -> PIL.Image.Image:
"""simple docstring"""
lowercase__ = do_resize if do_resize is not None else self.do_resize
lowercase__ = resample if resample is not None else self.resample
lowercase__ = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase__ = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ = offset if offset is not None else self.offset
lowercase__ = do_normalize if do_normalize is not None else self.do_normalize
lowercase__ = image_mean if image_mean is not None else self.image_mean
lowercase__ = image_std if image_std is not None else self.image_std
lowercase__ = size if size is not None else self.size
lowercase__ = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ )
lowercase__ = crop_size if crop_size is not None else self.crop_size
lowercase__ = get_size_dict(UpperCamelCase_ , param_name='''crop_size''' )
if not valid_images(UpperCamelCase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
lowercase__ = make_batched(UpperCamelCase_ )
lowercase__ = [
[
self._preprocess_image(
image=UpperCamelCase_ , do_resize=UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ , do_center_crop=UpperCamelCase_ , crop_size=UpperCamelCase_ , do_rescale=UpperCamelCase_ , rescale_factor=UpperCamelCase_ , offset=UpperCamelCase_ , do_normalize=UpperCamelCase_ , image_mean=UpperCamelCase_ , image_std=UpperCamelCase_ , data_format=UpperCamelCase_ , )
for img in video
]
for video in videos
]
lowercase__ = {'''pixel_values''': videos}
return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
| 43 |
'''simple docstring'''
from __future__ import annotations
from scipy.special import comb # type: ignore
class UpperCAmelCase__ :
def __init__( self : Optional[Any],__A : list[tuple[float, float]] ):
_lowerCamelCase : Tuple = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
_lowerCamelCase : int = len(__A ) - 1
def lowerCamelCase_ ( self : Optional[int],__A : float ):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
_lowerCamelCase : list[float] = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree,__A ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(__A ),5 ) == 1
return output_values
def lowerCamelCase_ ( self : int,__A : float ):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
_lowerCamelCase : List[Any] = self.basis_function(__A )
_lowerCamelCase : str = 0.0
_lowerCamelCase : str = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def lowerCamelCase_ ( self : Optional[Any],__A : float = 0.01 ):
from matplotlib import pyplot as plt # type: ignore
_lowerCamelCase : list[float] = [] # x coordinates of points to plot
_lowerCamelCase : list[float] = [] # y coordinates of points to plot
_lowerCamelCase : Tuple = 0.0
while t <= 1:
_lowerCamelCase : str = self.bezier_curve_function(__A )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
_lowerCamelCase : List[str] = [i[0] for i in self.list_of_points]
_lowerCamelCase : Union[str, Any] = [i[1] for i in self.list_of_points]
plt.plot(
__A,__A,color="blue",label="Curve of Degree " + str(self.degree ),)
plt.scatter(__A,__A,color="red",label="Control Points" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 44 | 0 |
from __future__ import annotations
def A ( lowercase__ : list[int] , lowercase__ : int ) -> list[int]:
UpperCamelCase__ :Tuple = 0
UpperCamelCase__ :Any = len(lowercase__ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
UpperCamelCase__ :Union[str, Any] = i + 1
else:
UpperCamelCase__ :Optional[int] = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{two_pointer([2, 7, 11, 15], 9) = }''')
| 45 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase__ ( metaclass=A ):
lowerCAmelCase_ = ['transformers', 'torch', 'note_seq']
def __init__( self : str,*__A : List[str],**__A : List[Any] ):
requires_backends(self,["transformers", "torch", "note_seq"] )
@classmethod
def lowerCamelCase_ ( cls : Optional[Any],*__A : str,**__A : Tuple ):
requires_backends(cls,["transformers", "torch", "note_seq"] )
@classmethod
def lowerCamelCase_ ( cls : Dict,*__A : Dict,**__A : Tuple ):
requires_backends(cls,["transformers", "torch", "note_seq"] )
| 44 | 0 |
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
return x if y == 0 else greatest_common_divisor(_lowerCamelCase , x % y )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
return (x * y) // greatest_common_divisor(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase = 20 ) -> int:
'''simple docstring'''
_lowerCamelCase : Any = 1
for i in range(1 , n + 1 ):
_lowerCamelCase : Optional[Any] = lcm(_lowerCamelCase , _lowerCamelCase )
return g
if __name__ == "__main__":
print(f'''{solution() = }''')
| 46 |
'''simple docstring'''
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = CodeGenTokenizer
lowerCAmelCase_ = CodeGenTokenizerFast
lowerCAmelCase_ = True
lowerCAmelCase_ = {'add_prefix_space': True}
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : List[str] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCamelCase : Dict = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
_lowerCamelCase : Any = dict(zip(__A,range(len(__A ) ) ) )
_lowerCamelCase : Optional[int] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
_lowerCamelCase : Tuple = {"unk_token": "<unk>"}
_lowerCamelCase : Optional[Any] = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["vocab_file"] )
_lowerCamelCase : Dict = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file,"w",encoding="utf-8" ) as fp:
fp.write(json.dumps(__A ) + "\n" )
with open(self.merges_file,"w",encoding="utf-8" ) as fp:
fp.write("\n".join(__A ) )
def lowerCamelCase_ ( self : Dict,**__A : Tuple ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname,**__A )
def lowerCamelCase_ ( self : Union[str, Any],**__A : int ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname,**__A )
def lowerCamelCase_ ( self : str,__A : Dict ):
_lowerCamelCase : Optional[Any] = "lower newer"
_lowerCamelCase : Union[str, Any] = "lower newer"
return input_text, output_text
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : int = CodeGenTokenizer(self.vocab_file,self.merges_file,**self.special_tokens_map )
_lowerCamelCase : Any = "lower newer"
_lowerCamelCase : Optional[Any] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
_lowerCamelCase : List[Any] = tokenizer.tokenize(__A,add_prefix_space=__A )
self.assertListEqual(__A,__A )
_lowerCamelCase : Union[str, Any] = tokens + [tokenizer.unk_token]
_lowerCamelCase : Dict = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ),__A )
def lowerCamelCase_ ( self : Any ):
if not self.test_rust_tokenizer:
return
_lowerCamelCase : str = self.get_tokenizer()
_lowerCamelCase : Optional[Any] = self.get_rust_tokenizer(add_prefix_space=__A )
_lowerCamelCase : Union[str, Any] = "lower newer"
# Testing tokenization
_lowerCamelCase : List[Any] = tokenizer.tokenize(__A,add_prefix_space=__A )
_lowerCamelCase : str = rust_tokenizer.tokenize(__A )
self.assertListEqual(__A,__A )
# Testing conversion to ids without special tokens
_lowerCamelCase : str = tokenizer.encode(__A,add_special_tokens=__A,add_prefix_space=__A )
_lowerCamelCase : List[str] = rust_tokenizer.encode(__A,add_special_tokens=__A )
self.assertListEqual(__A,__A )
# Testing conversion to ids with special tokens
_lowerCamelCase : List[Any] = self.get_rust_tokenizer(add_prefix_space=__A )
_lowerCamelCase : Union[str, Any] = tokenizer.encode(__A,add_prefix_space=__A )
_lowerCamelCase : Optional[int] = rust_tokenizer.encode(__A )
self.assertListEqual(__A,__A )
# Testing the unknown token
_lowerCamelCase : Optional[int] = tokens + [rust_tokenizer.unk_token]
_lowerCamelCase : Optional[Any] = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__A ),__A )
def lowerCamelCase_ ( self : Tuple,*__A : Any,**__A : Any ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def lowerCamelCase_ ( self : int,__A : Optional[int]=1_5 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_lowerCamelCase : Tuple = self.rust_tokenizer_class.from_pretrained(__A,**__A )
# Simple input
_lowerCamelCase : Dict = "This is a simple input"
_lowerCamelCase : Any = ["This is a simple input 1", "This is a simple input 2"]
_lowerCamelCase : Tuple = ("This is a simple input", "This is a pair")
_lowerCamelCase : Tuple = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(__A,tokenizer_r.encode,__A,max_length=__A,padding="max_length" )
# Simple input
self.assertRaises(__A,tokenizer_r.encode_plus,__A,max_length=__A,padding="max_length" )
# Simple input
self.assertRaises(
__A,tokenizer_r.batch_encode_plus,__A,max_length=__A,padding="max_length",)
# Pair input
self.assertRaises(__A,tokenizer_r.encode,__A,max_length=__A,padding="max_length" )
# Pair input
self.assertRaises(__A,tokenizer_r.encode_plus,__A,max_length=__A,padding="max_length" )
# Pair input
self.assertRaises(
__A,tokenizer_r.batch_encode_plus,__A,max_length=__A,padding="max_length",)
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : str = CodeGenTokenizer.from_pretrained(self.tmpdirname,pad_token="<pad>" )
# Simple input
_lowerCamelCase : Tuple = "This is a simple input"
_lowerCamelCase : Dict = ["This is a simple input looooooooong", "This is a simple input"]
_lowerCamelCase : Dict = ("This is a simple input", "This is a pair")
_lowerCamelCase : Dict = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
_lowerCamelCase : Dict = tokenizer.pad_token_id
_lowerCamelCase : Dict = tokenizer(__A,padding="max_length",max_length=3_0,return_tensors="np" )
_lowerCamelCase : int = tokenizer(__A,padding=__A,truncate=__A,return_tensors="np" )
_lowerCamelCase : List[Any] = tokenizer(*__A,padding="max_length",max_length=6_0,return_tensors="np" )
_lowerCamelCase : Tuple = tokenizer(__A,padding=__A,truncate=__A,return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1],3_0 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1],3_3 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1],6_0 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1],5_2 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : List[Any] = "$$$"
_lowerCamelCase : Tuple = CodeGenTokenizer.from_pretrained(self.tmpdirname,bos_token=__A,add_bos_token=__A )
_lowerCamelCase : List[str] = "This is a simple input"
_lowerCamelCase : Optional[Any] = ["This is a simple input 1", "This is a simple input 2"]
_lowerCamelCase : Union[str, Any] = tokenizer.bos_token_id
_lowerCamelCase : Any = tokenizer(__A )
_lowerCamelCase : List[str] = tokenizer(__A )
self.assertEqual(out_s.input_ids[0],__A )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_lowerCamelCase : int = tokenizer.decode(out_s.input_ids )
_lowerCamelCase : str = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0],__A )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : int = CodeGenTokenizer.from_pretrained("Salesforce/codegen-350M-mono" )
_lowerCamelCase : Optional[Any] = "\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"
_lowerCamelCase : Dict = "\nif len_a > len_b: result = a\nelse: result = b"
_lowerCamelCase : Any = tokenizer.encode(__A )
_lowerCamelCase : str = ["^#", re.escape("<|endoftext|>" ), "^'''", "^\"\"\"", "\n\n\n"]
_lowerCamelCase : List[Any] = tokenizer.decode(__A,truncate_before_pattern=__A )
self.assertEqual(__A,__A )
def lowerCamelCase_ ( self : Any ):
pass
| 44 | 0 |
import math
import tensorflow as tf
from packaging import version
def UpperCAmelCase__ ( lowerCamelCase_ : Tuple ):
__a : Any = tf.convert_to_tensor(lowerCamelCase_ )
__a : Any = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def UpperCAmelCase__ ( lowerCamelCase_ : Union[str, Any] ):
__a : Optional[Any] = tf.convert_to_tensor(lowerCamelCase_ )
__a : Union[str, Any] = tf.cast(math.pi , x.dtype )
__a : str = tf.cast(0.044715 , x.dtype )
__a : int = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(lowerCamelCase_ , 3 )) ))
return x * cdf
def UpperCAmelCase__ ( lowerCamelCase_ : Tuple ):
__a : Union[str, Any] = tf.convert_to_tensor(lowerCamelCase_ )
return x * tf.tanh(tf.math.softplus(lowerCamelCase_ ) )
def UpperCAmelCase__ ( lowerCamelCase_ : Union[str, Any] ):
__a : List[str] = tf.convert_to_tensor(lowerCamelCase_ )
__a : int = tf.cast(0.044715 , x.dtype )
__a : List[Any] = tf.cast(0.7978845608 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[Any] ):
__a : Dict = tf.convert_to_tensor(lowerCamelCase_ )
__a : int = tf.cast(1.702 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def UpperCAmelCase__ ( lowerCamelCase_ : List[Any] ):
return tf.clip_by_value(_gelu(lowerCamelCase_ ) , -1_0 , 1_0 )
def UpperCAmelCase__ ( lowerCamelCase_ : Dict , lowerCamelCase_ : List[Any]=-1 ):
__a , __a : Optional[Any] = tf.split(lowerCamelCase_ , 2 , axis=lowerCamelCase_ )
return a * tf.math.sigmoid(lowerCamelCase_ )
if version.parse(tf.version.VERSION) >= version.parse('''2.4'''):
def UpperCAmelCase__ ( lowerCamelCase_ : int ):
return tf.keras.activations.gelu(lowerCamelCase_ , approximate=lowerCamelCase_ )
SCREAMING_SNAKE_CASE__ = tf.keras.activations.gelu
SCREAMING_SNAKE_CASE__ = approximate_gelu_wrap
else:
SCREAMING_SNAKE_CASE__ = _gelu
SCREAMING_SNAKE_CASE__ = _gelu_new
SCREAMING_SNAKE_CASE__ = {
'''gelu''': gelu,
'''gelu_10''': gelu_aa,
'''gelu_fast''': gelu_fast,
'''gelu_new''': gelu_new,
'''glu''': glu,
'''mish''': mish,
'''quick_gelu''': quick_gelu,
'''relu''': tf.keras.activations.relu,
'''sigmoid''': tf.keras.activations.sigmoid,
'''silu''': tf.keras.activations.swish,
'''swish''': tf.keras.activations.swish,
'''tanh''': tf.keras.activations.tanh,
}
def UpperCAmelCase__ ( lowerCamelCase_ : str ):
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(f'''function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}''' )
| 47 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class UpperCAmelCase__ :
def __init__( self : Any,__A : int=2,__A : Any=3,__A : Optional[int]=6_4,__A : Tuple=None ):
_lowerCamelCase : int = np.random.default_rng(__A )
_lowerCamelCase : List[str] = length
_lowerCamelCase : Optional[Any] = rng.normal(size=(length,) ).astype(np.floataa )
_lowerCamelCase : Optional[int] = a * self.x + b + rng.normal(scale=0.1,size=(length,) ).astype(np.floataa )
def __len__( self : Dict ):
return self.length
def __getitem__( self : str,__A : List[str] ):
return {"x": self.x[i], "y": self.y[i]}
class UpperCAmelCase__ ( torch.nn.Module ):
def __init__( self : Union[str, Any],__A : Optional[Any]=0,__A : Optional[int]=0,__A : Dict=False ):
super().__init__()
_lowerCamelCase : Tuple = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
_lowerCamelCase : List[str] = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
_lowerCamelCase : Optional[int] = True
def lowerCamelCase_ ( self : List[str],__A : Tuple=None ):
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
_lowerCamelCase : Optional[Any] = False
return x * self.a[0] + self.b[0]
class UpperCAmelCase__ ( torch.nn.Module ):
def __init__( self : Union[str, Any],__A : List[str]=0,__A : List[str]=0,__A : int=False ):
super().__init__()
_lowerCamelCase : Optional[int] = torch.nn.Parameter(torch.tensor(__A ).float() )
_lowerCamelCase : Dict = torch.nn.Parameter(torch.tensor(__A ).float() )
_lowerCamelCase : Tuple = True
def lowerCamelCase_ ( self : str,__A : List[Any]=None ):
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
_lowerCamelCase : Optional[Any] = False
return x * self.a + self.b
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : int = 16 ):
"""simple docstring"""
from datasets import load_dataset
from transformers import AutoTokenizer
_lowerCamelCase : Tuple = AutoTokenizer.from_pretrained("bert-base-cased" )
_lowerCamelCase : List[Any] = {"train": "tests/test_samples/MRPC/train.csv", "validation": "tests/test_samples/MRPC/dev.csv"}
_lowerCamelCase : int = load_dataset("csv" , data_files=_lowerCAmelCase )
_lowerCamelCase : Dict = datasets["train"].unique("label" )
_lowerCamelCase : Optional[Any] = {v: i for i, v in enumerate(_lowerCAmelCase )}
def tokenize_function(_lowerCAmelCase : int ):
# max_length=None => use the model max length (it's actually the default)
_lowerCamelCase : Optional[int] = tokenizer(
examples["sentence1"] , examples["sentence2"] , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase , padding="max_length" )
if "label" in examples:
_lowerCamelCase : str = [label_to_id[l] for l in examples["label"]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_lowerCamelCase : Optional[Any] = datasets.map(
_lowerCAmelCase , batched=_lowerCAmelCase , remove_columns=["sentence1", "sentence2", "label"] , )
def collate_fn(_lowerCAmelCase : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowerCAmelCase , padding="max_length" , max_length=128 , return_tensors="pt" )
return tokenizer.pad(_lowerCAmelCase , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
_lowerCamelCase : str = DataLoader(tokenized_datasets["train"] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=2 )
_lowerCamelCase : Optional[int] = DataLoader(tokenized_datasets["validation"] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=1 )
return train_dataloader, eval_dataloader
| 44 | 0 |
'''simple docstring'''
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class A ( SCREAMING_SNAKE_CASE__ ):
def __init__( self : Any , __magic_name__ : pyspark.sql.DataFrame , __magic_name__ : Optional[NamedSplit] = None , __magic_name__ : Optional[Features] = None , __magic_name__ : bool = True , __magic_name__ : str = None , __magic_name__ : bool = False , __magic_name__ : str = None , __magic_name__ : bool = True , __magic_name__ : str = "arrow" , **__magic_name__ : int , ):
"""simple docstring"""
super().__init__(
split=__magic_name__ , features=__magic_name__ , cache_dir=__magic_name__ , keep_in_memory=__magic_name__ , streaming=__magic_name__ , **__magic_name__ , )
lowerCAmelCase__ = load_from_cache_file
lowerCAmelCase__ = file_format
lowerCAmelCase__ = Spark(
df=__magic_name__ , features=__magic_name__ , cache_dir=__magic_name__ , working_dir=__magic_name__ , **__magic_name__ , )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
lowerCAmelCase__ = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=__magic_name__ , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 48 |
'''simple docstring'''
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Optional[Any] = False, False, False
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = None
lowerCAmelCase_ = True
lowerCAmelCase_ = True
lowerCAmelCase_ = None
# Automatically constructed
lowerCAmelCase_ = "dict"
lowerCAmelCase_ = pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
lowerCAmelCase_ = field(default='Audio' , init=A , repr=A )
def __call__( self : Tuple ):
return self.pa_type
def lowerCamelCase_ ( self : Any,__A : Union[str, bytes, dict] ):
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("To support encoding audio data, please install 'soundfile'." ) from err
if isinstance(__A,__A ):
return {"bytes": None, "path": value}
elif isinstance(__A,__A ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
_lowerCamelCase : List[Any] = BytesIO()
sf.write(__A,value["array"],value["sampling_rate"],format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("pcm" ):
# "PCM" only has raw audio bytes
if value.get("sampling_rate" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object" )
if value.get("bytes" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
_lowerCamelCase : Dict = np.frombuffer(value["bytes"],dtype=np.intaa ).astype(np.floataa ) / 3_2_7_6_7
else:
_lowerCamelCase : str = np.memmap(value["path"],dtype="h",mode="r" ).astype(np.floataa ) / 3_2_7_6_7
_lowerCamelCase : Optional[int] = BytesIO(bytes() )
sf.write(__A,__A,value["sampling_rate"],format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f'An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.' )
def lowerCamelCase_ ( self : Optional[Any],__A : dict,__A : Optional[Dict[str, Union[str, bool, None]]] = None ):
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead." )
_lowerCamelCase , _lowerCamelCase : Optional[Any] = (value["path"], BytesIO(value["bytes"] )) if value["bytes"] is not None else (value["path"], None)
if path is None and file is None:
raise ValueError(f'An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'." ) from err
_lowerCamelCase : Tuple = xsplitext(__A )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
if file is None:
_lowerCamelCase : Tuple = token_per_repo_id or {}
_lowerCamelCase : Union[str, Any] = path.split("::" )[-1]
try:
_lowerCamelCase : str = string_to_dict(__A,config.HUB_DATASETS_URL )["repo_id"]
_lowerCamelCase : str = token_per_repo_id[repo_id]
except (ValueError, KeyError):
_lowerCamelCase : Any = None
with xopen(__A,"rb",use_auth_token=__A ) as f:
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = sf.read(__A )
else:
_lowerCamelCase , _lowerCamelCase : str = sf.read(__A )
_lowerCamelCase : List[str] = array.T
if self.mono:
_lowerCamelCase : List[str] = librosa.to_mono(__A )
if self.sampling_rate and self.sampling_rate != sampling_rate:
_lowerCamelCase : List[str] = librosa.resample(__A,orig_sr=__A,target_sr=self.sampling_rate )
_lowerCamelCase : Optional[Any] = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def lowerCamelCase_ ( self : Any ):
from .features import Value
if self.decode:
raise ValueError("Cannot flatten a decoded Audio feature." )
return {
"bytes": Value("binary" ),
"path": Value("string" ),
}
def lowerCamelCase_ ( self : List[str],__A : Union[pa.StringArray, pa.StructArray] ):
if pa.types.is_string(storage.type ):
_lowerCamelCase : Any = pa.array([None] * len(__A ),type=pa.binary() )
_lowerCamelCase : int = pa.StructArray.from_arrays([bytes_array, storage],["bytes", "path"],mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
_lowerCamelCase : Dict = pa.array([None] * len(__A ),type=pa.string() )
_lowerCamelCase : Any = pa.StructArray.from_arrays([storage, path_array],["bytes", "path"],mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("array" ):
_lowerCamelCase : Tuple = pa.array([Audio().encode_example(__A ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
_lowerCamelCase : Tuple = storage.field("bytes" )
else:
_lowerCamelCase : Any = pa.array([None] * len(__A ),type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
_lowerCamelCase : List[str] = storage.field("path" )
else:
_lowerCamelCase : Tuple = pa.array([None] * len(__A ),type=pa.string() )
_lowerCamelCase : Tuple = pa.StructArray.from_arrays([bytes_array, path_array],["bytes", "path"],mask=storage.is_null() )
return array_cast(__A,self.pa_type )
def lowerCamelCase_ ( self : str,__A : pa.StructArray ):
@no_op_if_value_is_null
def path_to_bytes(__A : Dict ):
with xopen(__A,"rb" ) as f:
_lowerCamelCase : Any = f.read()
return bytes_
_lowerCamelCase : int = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
],type=pa.binary(),)
_lowerCamelCase : str = pa.array(
[os.path.basename(__A ) if path is not None else None for path in storage.field("path" ).to_pylist()],type=pa.string(),)
_lowerCamelCase : Dict = pa.StructArray.from_arrays([bytes_array, path_array],["bytes", "path"],mask=bytes_array.is_null() )
return array_cast(__A,self.pa_type )
| 44 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowercase : int = {
'configuration_graphormer': ['GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GraphormerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[str] = [
'GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'GraphormerForGraphClassification',
'GraphormerModel',
'GraphormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
_lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 49 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : str = {
'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'glpn'
def __init__( self : Tuple,__A : Optional[int]=3,__A : Optional[int]=4,__A : str=[2, 2, 2, 2],__A : Union[str, Any]=[8, 4, 2, 1],__A : Tuple=[3_2, 6_4, 1_6_0, 2_5_6],__A : int=[7, 3, 3, 3],__A : str=[4, 2, 2, 2],__A : int=[1, 2, 5, 8],__A : List[Any]=[4, 4, 4, 4],__A : Optional[int]="gelu",__A : int=0.0,__A : Tuple=0.0,__A : Tuple=0.02,__A : Optional[int]=0.1,__A : Optional[int]=1e-6,__A : Optional[int]=6_4,__A : Optional[Any]=1_0,__A : Tuple=-1,**__A : List[str],):
super().__init__(**__A )
_lowerCamelCase : Tuple = num_channels
_lowerCamelCase : Union[str, Any] = num_encoder_blocks
_lowerCamelCase : Dict = depths
_lowerCamelCase : List[Any] = sr_ratios
_lowerCamelCase : str = hidden_sizes
_lowerCamelCase : Any = patch_sizes
_lowerCamelCase : Any = strides
_lowerCamelCase : Dict = mlp_ratios
_lowerCamelCase : int = num_attention_heads
_lowerCamelCase : List[Any] = hidden_act
_lowerCamelCase : str = hidden_dropout_prob
_lowerCamelCase : List[Any] = attention_probs_dropout_prob
_lowerCamelCase : Optional[int] = initializer_range
_lowerCamelCase : Union[str, Any] = drop_path_rate
_lowerCamelCase : str = layer_norm_eps
_lowerCamelCase : Tuple = decoder_hidden_size
_lowerCamelCase : int = max_depth
_lowerCamelCase : Dict = head_in_index
| 44 | 0 |
'''simple docstring'''
def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ):
return number | (1 << position)
def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ):
return number & ~(1 << position)
def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ):
return number ^ (1 << position)
def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ):
return ((number >> position) & 1) == 1
def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ):
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 50 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
UpperCAmelCase_ : str = logging.get_logger(__name__)
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = ['input_features', 'attention_mask']
def __init__( self : Any,__A : List[Any]=8_0,__A : Dict=1_6_0_0_0,__A : Tuple=0.0,__A : Dict=1_0,__A : int=2_5,__A : Union[str, Any]="hamming_window",__A : List[str]=32768.0,__A : Union[str, Any]=0.97,__A : str=1.0,__A : Union[str, Any]=True,__A : Tuple=True,__A : Optional[Any]=False,**__A : Optional[Any],):
super().__init__(feature_size=__A,sampling_rate=__A,padding_value=__A,**__A )
_lowerCamelCase : Dict = feature_size
_lowerCamelCase : List[str] = sampling_rate
_lowerCamelCase : Any = padding_value
_lowerCamelCase : Dict = hop_length
_lowerCamelCase : Tuple = win_length
_lowerCamelCase : str = frame_signal_scale
_lowerCamelCase : List[str] = preemphasis_coeff
_lowerCamelCase : List[str] = mel_floor
_lowerCamelCase : str = normalize_means
_lowerCamelCase : Any = normalize_vars
_lowerCamelCase : List[str] = win_function
_lowerCamelCase : Tuple = return_attention_mask
_lowerCamelCase : List[Any] = win_length * sampling_rate // 1_0_0_0
_lowerCamelCase : List[Any] = hop_length * sampling_rate // 1_0_0_0
_lowerCamelCase : Any = optimal_fft_length(self.sample_size )
_lowerCamelCase : Dict = (self.n_fft // 2) + 1
def lowerCamelCase_ ( self : Any,__A : np.array ):
if self.win_function == "hamming_window":
_lowerCamelCase : Any = window_function(window_length=self.sample_size,name=self.win_function,periodic=__A )
else:
_lowerCamelCase : Optional[int] = window_function(window_length=self.sample_size,name=self.win_function )
_lowerCamelCase : int = mel_filter_bank(
num_frequency_bins=self.n_freqs,num_mel_filters=self.feature_size,min_frequency=0.0,max_frequency=self.sampling_rate / 2.0,sampling_rate=self.sampling_rate,)
_lowerCamelCase : List[str] = spectrogram(
one_waveform * self.frame_signal_scale,window=__A,frame_length=self.sample_size,hop_length=self.sample_stride,fft_length=self.n_fft,center=__A,preemphasis=self.preemphasis_coeff,mel_filters=__A,mel_floor=self.mel_floor,log_mel="log",)
return msfc_features.T
def lowerCamelCase_ ( self : Optional[int],__A : List[str],__A : Dict,__A : int ):
# make sure we normalize float32 arrays
if self.normalize_means:
_lowerCamelCase : Optional[Any] = x[:input_length].mean(axis=0 )
_lowerCamelCase : Optional[int] = np.subtract(__A,__A )
if self.normalize_vars:
_lowerCamelCase : int = x[:input_length].std(axis=0 )
_lowerCamelCase : Any = np.divide(__A,__A )
if input_length < x.shape[0]:
_lowerCamelCase : Tuple = padding_value
# make sure array is in float32
_lowerCamelCase : Optional[int] = x.astype(np.floataa )
return x
def lowerCamelCase_ ( self : Any,__A : List[np.ndarray],__A : Optional[np.ndarray] = None ):
_lowerCamelCase : Optional[int] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(__A,__A,self.padding_value ) for x, n in zip(__A,__A )]
def __call__( self : Optional[Any],__A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]],__A : Union[bool, str, PaddingStrategy] = False,__A : Optional[int] = None,__A : bool = False,__A : Optional[int] = None,__A : Optional[bool] = None,__A : Optional[Union[str, TensorType]] = None,__A : Optional[int] = None,**__A : Optional[Any],):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
f' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'
f' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
_lowerCamelCase : List[str] = isinstance(__A,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
_lowerCamelCase : List[str] = is_batched_numpy or (
isinstance(__A,(list, tuple) ) and (isinstance(raw_speech[0],(np.ndarray, tuple, list) ))
)
if is_batched:
_lowerCamelCase : List[Any] = [np.asarray(__A,dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__A,np.ndarray ):
_lowerCamelCase : Dict = np.asarray(__A,dtype=np.floataa )
elif isinstance(__A,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_lowerCamelCase : Tuple = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_lowerCamelCase : Tuple = [raw_speech]
# extract fbank features
_lowerCamelCase : str = [self._extract_mfsc_features(__A ) for one_waveform in raw_speech]
# convert into correct format for padding
_lowerCamelCase : Union[str, Any] = BatchFeature({"input_features": features} )
_lowerCamelCase : List[Any] = self.pad(
__A,padding=__A,max_length=__A,truncation=__A,pad_to_multiple_of=__A,return_attention_mask=__A,**__A,)
# make sure list is in array format
_lowerCamelCase : Optional[Any] = padded_inputs.get("input_features" )
if isinstance(input_features[0],__A ):
_lowerCamelCase : int = [np.asarray(__A,dtype=np.floataa ) for feature in input_features]
_lowerCamelCase : Dict = padded_inputs.get("attention_mask" )
if attention_mask is not None:
_lowerCamelCase : Dict = [np.asarray(__A,dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
_lowerCamelCase : Dict = (
np.array(__A,dtype=np.intaa )
if self._get_padding_strategies(__A,max_length=__A ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
_lowerCamelCase : Tuple = self.normalize(
padded_inputs["input_features"],attention_mask=__A )
if return_tensors is not None:
_lowerCamelCase : Dict = padded_inputs.convert_to_tensors(__A )
return padded_inputs
| 44 | 0 |
'''simple docstring'''
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
a__ : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
'''simple docstring'''
@register_to_config
def __init__( self : int , a__ : bool , a__ : Optional[int] = None , a__ : Optional[int] = None ):
super().__init__()
UpperCAmelCase = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
UpperCAmelCase = torch.zeros(a__ , a__ )
else:
UpperCAmelCase = None
UpperCAmelCase = torch.nn.Parameter(a__ )
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase =42
_lowerCamelCase =42
_lowerCamelCase =42
_lowerCamelCase =42
_lowerCamelCase =42
_lowerCamelCase =42
def __init__( self : List[str] , a__ : VQModel , a__ : CLIPTextModel , a__ : CLIPTokenizer , a__ : TransformeraDModel , a__ : VQDiffusionScheduler , a__ : LearnedClassifierFreeSamplingEmbeddings , ):
super().__init__()
self.register_modules(
vqvae=a__ , transformer=a__ , text_encoder=a__ , tokenizer=a__ , scheduler=a__ , learned_classifier_free_sampling_embeddings=a__ , )
def __snake_case ( self : Any , a__ : Dict , a__ : Optional[int] , a__ : Optional[Any] ):
UpperCAmelCase = len(a__ ) if isinstance(a__ , a__ ) else 1
# get prompt text embeddings
UpperCAmelCase = self.tokenizer(
a__ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
UpperCAmelCase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCAmelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
f" {self.tokenizer.model_max_length} tokens: {removed_text}" )
UpperCAmelCase = text_input_ids[:, : self.tokenizer.model_max_length]
UpperCAmelCase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
UpperCAmelCase = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=a__ )
# duplicate text embeddings for each generation per prompt
UpperCAmelCase = prompt_embeds.repeat_interleave(a__ , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
UpperCAmelCase = self.learned_classifier_free_sampling_embeddings.embeddings
UpperCAmelCase = negative_prompt_embeds.unsqueeze(0 ).repeat(a__ , 1 , 1 )
else:
UpperCAmelCase = [''''''] * batch_size
UpperCAmelCase = text_input_ids.shape[-1]
UpperCAmelCase = self.tokenizer(
a__ , padding='''max_length''' , max_length=a__ , truncation=a__ , return_tensors='''pt''' , )
UpperCAmelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
UpperCAmelCase = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=a__ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCAmelCase = negative_prompt_embeds.shape[1]
UpperCAmelCase = negative_prompt_embeds.repeat(1 , a__ , 1 )
UpperCAmelCase = negative_prompt_embeds.view(batch_size * num_images_per_prompt , a__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCAmelCase = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self : Optional[Any] , a__ : Union[str, List[str]] , a__ : int = 100 , a__ : float = 5.0 , a__ : float = 1.0 , a__ : int = 1 , a__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , a__ : Optional[torch.FloatTensor] = None , a__ : Optional[str] = "pil" , a__ : bool = True , a__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , a__ : int = 1 , ):
if isinstance(a__ , a__ ):
UpperCAmelCase = 1
elif isinstance(a__ , a__ ):
UpperCAmelCase = len(a__ )
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(a__ )}" )
UpperCAmelCase = batch_size * num_images_per_prompt
UpperCAmelCase = guidance_scale > 1.0
UpperCAmelCase = self._encode_prompt(a__ , a__ , a__ )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(a__ , a__ ) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(a__ )}." )
# get the initial completely masked latents unless the user supplied it
UpperCAmelCase = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
UpperCAmelCase = self.transformer.num_vector_embeds - 1
UpperCAmelCase = torch.full(a__ , a__ ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'''Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'''
f" {self.transformer.num_vector_embeds - 1} (inclusive)." )
UpperCAmelCase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(a__ , device=self.device )
UpperCAmelCase = self.scheduler.timesteps.to(self.device )
UpperCAmelCase = latents
for i, t in enumerate(self.progress_bar(a__ ) ):
# expand the sample if we are doing classifier free guidance
UpperCAmelCase = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
UpperCAmelCase = self.transformer(a__ , encoder_hidden_states=a__ , timestep=a__ ).sample
if do_classifier_free_guidance:
UpperCAmelCase, UpperCAmelCase = model_output.chunk(2 )
UpperCAmelCase = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(a__ , dim=1 , keepdim=a__ )
UpperCAmelCase = self.truncate(a__ , a__ )
# remove `log(0)`'s (`-inf`s)
UpperCAmelCase = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase = self.scheduler.step(a__ , timestep=a__ , sample=a__ , generator=a__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(a__ , a__ , a__ )
UpperCAmelCase = self.vqvae.config.vq_embed_dim
UpperCAmelCase = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
UpperCAmelCase = self.vqvae.quantize.get_codebook_entry(a__ , shape=a__ )
UpperCAmelCase = self.vqvae.decode(a__ , force_not_quantize=a__ ).sample
UpperCAmelCase = (image / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase = self.numpy_to_pil(a__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a__ )
def __snake_case ( self : List[str] , a__ : torch.FloatTensor , a__ : float ):
UpperCAmelCase, UpperCAmelCase = torch.sort(a__ , 1 , descending=a__ )
UpperCAmelCase = torch.exp(a__ )
UpperCAmelCase = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
UpperCAmelCase = torch.full_like(keep_mask[:, 0:1, :] , a__ )
UpperCAmelCase = torch.cat((all_true, keep_mask) , dim=1 )
UpperCAmelCase = keep_mask[:, :-1, :]
UpperCAmelCase = keep_mask.gather(1 , indices.argsort(1 ) )
UpperCAmelCase = log_p_x_0.clone()
UpperCAmelCase = -torch.inf # -inf = log(0)
return rv
| 51 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Dict = [
('bert.bert', 'visual_bert'),
('bert.cls', 'cls'),
('bert.classifier', 'cls'),
('token_type_embeddings_visual', 'visual_token_type_embeddings'),
('position_embeddings_visual', 'visual_position_embeddings'),
('projection', 'visual_projection'),
]
UpperCAmelCase_ : int = [
'nlvr2_coco_pre_trained.th',
'nlvr2_fine_tuned.th',
'nlvr2_pre_trained.th',
'vcr_coco_pre_train.th',
'vcr_fine_tune.th',
'vcr_pre_train.th',
'vqa_coco_pre_trained.th',
'vqa_fine_tuned.th',
'vqa_pre_trained.th',
]
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = torch.load(_lowerCAmelCase , map_location="cpu" )
return sd
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple=rename_keys_prefix ):
"""simple docstring"""
_lowerCamelCase : Any = OrderedDict()
_lowerCamelCase : str = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
_lowerCamelCase : Any = key
for name_pair in rename_keys_prefix:
_lowerCamelCase : Dict = new_key.replace(name_pair[0] , name_pair[1] )
_lowerCamelCase : Any = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
_lowerCamelCase : List[str] = new_d["cls.predictions.bias"]
return new_d
@torch.no_grad()
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : Dict ):
"""simple docstring"""
assert (
checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS
), F'The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'
# Get Config
if "pre" in checkpoint_path:
_lowerCamelCase : Optional[int] = "pretraining"
if "vcr" in checkpoint_path:
_lowerCamelCase : Union[str, Any] = {"visual_embedding_dim": 512}
elif "vqa_advanced" in checkpoint_path:
_lowerCamelCase : List[str] = {"visual_embedding_dim": 2048}
elif "vqa" in checkpoint_path:
_lowerCamelCase : int = {"visual_embedding_dim": 2048}
elif "nlvr" in checkpoint_path:
_lowerCamelCase : List[str] = {"visual_embedding_dim": 1024}
else:
raise NotImplementedError(F'No implementation found for `{checkpoint_path}`.' )
else:
if "vcr" in checkpoint_path:
_lowerCamelCase : Any = {"visual_embedding_dim": 512}
_lowerCamelCase : List[Any] = "multichoice"
elif "vqa_advanced" in checkpoint_path:
_lowerCamelCase : Tuple = {"visual_embedding_dim": 2048}
_lowerCamelCase : Dict = "vqa_advanced"
elif "vqa" in checkpoint_path:
_lowerCamelCase : Union[str, Any] = {"visual_embedding_dim": 2048, "num_labels": 3129}
_lowerCamelCase : Optional[int] = "vqa"
elif "nlvr" in checkpoint_path:
_lowerCamelCase : Tuple = {
"visual_embedding_dim": 1024,
"num_labels": 2,
}
_lowerCamelCase : Optional[Any] = "nlvr"
_lowerCamelCase : str = VisualBertConfig(**_lowerCAmelCase )
# Load State Dict
_lowerCamelCase : str = load_state_dict(_lowerCAmelCase )
_lowerCamelCase : List[str] = get_new_dict(_lowerCAmelCase , _lowerCAmelCase )
if model_type == "pretraining":
_lowerCamelCase : List[Any] = VisualBertForPreTraining(_lowerCAmelCase )
elif model_type == "vqa":
_lowerCamelCase : Dict = VisualBertForQuestionAnswering(_lowerCAmelCase )
elif model_type == "nlvr":
_lowerCamelCase : Tuple = VisualBertForVisualReasoning(_lowerCAmelCase )
elif model_type == "multichoice":
_lowerCamelCase : str = VisualBertForMultipleChoice(_lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
# Save Checkpoints
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('orig_checkpoint_path', type=str, help='A path to .th on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', type=str, help='Path to the output PyTorch model.')
UpperCAmelCase_ : Tuple = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 44 | 0 |
"""simple docstring"""
from __future__ import annotations
A = list[list[int]]
# assigning initial values to the grid
A = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
A = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def __A ( a_ :Matrix , a_ :int , a_ :int , a_ :int) -> bool:
for i in range(9):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3):
for j in range(3):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def __A ( a_ :Matrix) -> tuple[int, int] | None:
for i in range(9):
for j in range(9):
if grid[i][j] == 0:
return i, j
return None
def __A ( a_ :Matrix) -> Matrix | None:
if location := find_empty_location(a_):
__a , __a : int = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10):
if is_safe(a_ , a_ , a_ , a_):
__a : Union[str, Any] = digit
if sudoku(a_) is not None:
return grid
__a : Optional[Any] = 0
return None
def __A ( a_ :Matrix) -> None:
for row in grid:
for cell in row:
print(a_ , end=''' ''')
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('''\nExample grid:\n''' + '''=''' * 20)
print_solution(example_grid)
print('''\nExample grid solution:''')
A = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('''Cannot find a solution.''')
| 52 |
'''simple docstring'''
import functools
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : list[int] ):
"""simple docstring"""
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or not all(isinstance(_lowerCAmelCase , _lowerCAmelCase ) for day in days ):
raise ValueError("The parameter days should be a list of integers" )
if len(_lowerCAmelCase ) != 3 or not all(isinstance(_lowerCAmelCase , _lowerCAmelCase ) for cost in costs ):
raise ValueError("The parameter costs should be a list of three integers" )
if len(_lowerCAmelCase ) == 0:
return 0
if min(_lowerCAmelCase ) <= 0:
raise ValueError("All days elements should be greater than 0" )
if max(_lowerCAmelCase ) >= 366:
raise ValueError("All days elements should be less than 366" )
_lowerCamelCase : Union[str, Any] = set(_lowerCAmelCase )
@functools.cache
def dynamic_programming(_lowerCAmelCase : int ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44 | 0 |
import qiskit
def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : int ):
__lowerCAmelCase = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
__lowerCAmelCase = qiskit.QuantumCircuit(lowerCAmelCase_, lowerCAmelCase_ )
# Map the quantum measurement to the classical bits
circuit.measure([0], [0] )
# Execute the circuit on the simulator
__lowerCAmelCase = qiskit.execute(lowerCAmelCase_, lowerCAmelCase_, shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(lowerCAmelCase_ )
if __name__ == "__main__":
print(F"""Total count for various states are: {single_qubit_measure(1, 1)}""")
| 53 |
'''simple docstring'''
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = SwinConfig.from_pretrained(
"microsoft/swin-tiny-patch4-window7-224" , out_features=["stage1", "stage2", "stage3", "stage4"] )
_lowerCamelCase : Dict = MaskFormerConfig(backbone_config=_lowerCAmelCase )
_lowerCamelCase : Tuple = "huggingface/label-files"
if "ade20k-full" in model_name:
# this should be ok
_lowerCamelCase : List[Any] = 847
_lowerCamelCase : str = "maskformer-ade20k-full-id2label.json"
elif "ade" in model_name:
# this should be ok
_lowerCamelCase : Optional[int] = 150
_lowerCamelCase : Union[str, Any] = "ade20k-id2label.json"
elif "coco-stuff" in model_name:
# this should be ok
_lowerCamelCase : Union[str, Any] = 171
_lowerCamelCase : str = "maskformer-coco-stuff-id2label.json"
elif "coco" in model_name:
# TODO
_lowerCamelCase : Optional[int] = 133
_lowerCamelCase : Any = "coco-panoptic-id2label.json"
elif "cityscapes" in model_name:
# this should be ok
_lowerCamelCase : str = 19
_lowerCamelCase : Tuple = "cityscapes-id2label.json"
elif "vistas" in model_name:
# this should be ok
_lowerCamelCase : List[Any] = 65
_lowerCamelCase : Optional[int] = "mapillary-vistas-id2label.json"
_lowerCamelCase : Any = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" ) , "r" ) )
_lowerCamelCase : Optional[int] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
return config
def A_ ( _lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : Any = []
# stem
# fmt: off
rename_keys.append(("backbone.patch_embed.proj.weight", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.proj.bias", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "model.pixel_level_module.encoder.model.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "model.pixel_level_module.encoder.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_index', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((F'backbone.layers.{i}.downsample.reduction.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((F'backbone.layers.{i}.downsample.norm.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((F'backbone.layers.{i}.downsample.norm.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append((F'backbone.norm{i}.weight', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.weight') )
rename_keys.append((F'backbone.norm{i}.bias', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.bias') )
# FPN
rename_keys.append(("sem_seg_head.layer_4.weight", "model.pixel_level_module.decoder.fpn.stem.0.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.weight", "model.pixel_level_module.decoder.fpn.stem.1.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.bias", "model.pixel_level_module.decoder.fpn.stem.1.bias") )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((F'sem_seg_head.adapter_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight') )
rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight') )
rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias') )
rename_keys.append(("sem_seg_head.mask_features.weight", "model.pixel_level_module.decoder.mask_projection.weight") )
rename_keys.append(("sem_seg_head.mask_features.bias", "model.pixel_level_module.decoder.mask_projection.bias") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias') )
# cross-attention out projection
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias') )
# MLP 1
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight', F'model.transformer_module.decoder.layers.{idx}.fc1.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias', F'model.transformer_module.decoder.layers.{idx}.fc1.bias') )
# MLP 2
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight', F'model.transformer_module.decoder.layers.{idx}.fc2.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias', F'model.transformer_module.decoder.layers.{idx}.fc2.bias') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias') )
# layernorm 3 (final layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias') )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.weight", "model.transformer_module.decoder.layernorm.weight") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.bias", "model.transformer_module.decoder.layernorm.bias") )
# heads on top
rename_keys.append(("sem_seg_head.predictor.query_embed.weight", "model.transformer_module.queries_embedder.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.weight", "model.transformer_module.input_projection.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.bias", "model.transformer_module.input_projection.bias") )
rename_keys.append(("sem_seg_head.predictor.class_embed.weight", "class_predictor.weight") )
rename_keys.append(("sem_seg_head.predictor.class_embed.bias", "class_predictor.bias") )
for i in range(3 ):
rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.weight', F'mask_embedder.{i}.0.weight') )
rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.bias', F'mask_embedder.{i}.0.bias') )
# fmt: on
return rename_keys
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : Tuple = dct.pop(_lowerCAmelCase )
_lowerCamelCase : str = val
def A_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : str = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_lowerCamelCase : int = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_lowerCamelCase : Union[str, Any] = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.weight' )
_lowerCamelCase : List[str] = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : Optional[int] = in_proj_weight[:dim, :]
_lowerCamelCase : Optional[int] = in_proj_bias[: dim]
_lowerCamelCase : List[str] = in_proj_weight[
dim : dim * 2, :
]
_lowerCamelCase : List[Any] = in_proj_bias[
dim : dim * 2
]
_lowerCamelCase : List[Any] = in_proj_weight[
-dim :, :
]
_lowerCamelCase : Union[str, Any] = in_proj_bias[-dim :]
# fmt: on
def A_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : int = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
_lowerCamelCase : Tuple = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight' )
_lowerCamelCase : Optional[int] = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : Optional[Any] = in_proj_weight[: hidden_size, :]
_lowerCamelCase : Optional[int] = in_proj_bias[:config.hidden_size]
_lowerCamelCase : str = in_proj_weight[hidden_size : hidden_size * 2, :]
_lowerCamelCase : Dict = in_proj_bias[hidden_size : hidden_size * 2]
_lowerCamelCase : Any = in_proj_weight[-hidden_size :, :]
_lowerCamelCase : Any = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
_lowerCamelCase : Optional[int] = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight' )
_lowerCamelCase : List[Any] = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : Tuple = in_proj_weight[: hidden_size, :]
_lowerCamelCase : str = in_proj_bias[:config.hidden_size]
_lowerCamelCase : str = in_proj_weight[hidden_size : hidden_size * 2, :]
_lowerCamelCase : Optional[int] = in_proj_bias[hidden_size : hidden_size * 2]
_lowerCamelCase : int = in_proj_weight[-hidden_size :, :]
_lowerCamelCase : Optional[Any] = in_proj_bias[-hidden_size :]
# fmt: on
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : List[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : Optional[Any] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : bool = False ):
"""simple docstring"""
_lowerCamelCase : Tuple = get_maskformer_config(_lowerCAmelCase )
# load original state_dict
with open(_lowerCAmelCase , "rb" ) as f:
_lowerCamelCase : List[Any] = pickle.load(_lowerCAmelCase )
_lowerCamelCase : Optional[Any] = data["model"]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
_lowerCamelCase : List[Any] = create_rename_keys(_lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_swin_q_k_v(_lowerCAmelCase , config.backbone_config )
read_in_decoder_q_k_v(_lowerCAmelCase , _lowerCAmelCase )
# update to torch tensors
for key, value in state_dict.items():
_lowerCamelCase : Dict = torch.from_numpy(_lowerCAmelCase )
# load 🤗 model
_lowerCamelCase : int = MaskFormerForInstanceSegmentation(_lowerCAmelCase )
model.eval()
for name, param in model.named_parameters():
print(_lowerCAmelCase , param.shape )
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(_lowerCAmelCase ) == 0, F'Unexpected keys: {unexpected_keys}'
# verify results
_lowerCamelCase : Any = prepare_img()
if "vistas" in model_name:
_lowerCamelCase : Any = 65
elif "cityscapes" in model_name:
_lowerCamelCase : Optional[Any] = 65535
else:
_lowerCamelCase : str = 255
_lowerCamelCase : List[str] = True if "ade" in model_name else False
_lowerCamelCase : Union[str, Any] = MaskFormerImageProcessor(ignore_index=_lowerCAmelCase , reduce_labels=_lowerCAmelCase )
_lowerCamelCase : int = image_processor(_lowerCAmelCase , return_tensors="pt" )
_lowerCamelCase : Tuple = model(**_lowerCAmelCase )
print("Logits:" , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
_lowerCamelCase : Tuple = torch.tensor(
[[3.6_3_5_3, -4.4_7_7_0, -2.6_0_6_5], [0.5_0_8_1, -4.2_3_9_4, -3.5_3_4_3], [2.1_9_0_9, -5.0_3_5_3, -1.9_3_2_3]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , _lowerCAmelCase , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F'Saving model and image processor to {pytorch_dump_folder_path}' )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
image_processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
print("Pushing model and image processor to the hub..." )
model.push_to_hub(F'nielsr/{model_name}' )
image_processor.push_to_hub(F'nielsr/{model_name}' )
if __name__ == "__main__":
UpperCAmelCase_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='maskformer-swin-tiny-ade',
type=str,
help=('Name of the MaskFormer model you\'d like to convert',),
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl',
type=str,
help='Path to the original state dict (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
UpperCAmelCase_ : int = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 44 | 0 |
from __future__ import annotations
class A :
def __init__( self: Union[str, Any] , _lowerCAmelCase: int = 0 ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =key
def lowerCAmelCase__ ( self: Optional[int] , _lowerCAmelCase: str , _lowerCAmelCase: int ) -> list[str]:
'''simple docstring'''
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and isinstance(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase_ =key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_lowerCAmelCase ) ^ key ) for ch in content]
def lowerCAmelCase__ ( self: Tuple , _lowerCAmelCase: str , _lowerCAmelCase: int ) -> list[str]:
'''simple docstring'''
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and isinstance(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase_ =key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_lowerCAmelCase ) ^ key ) for ch in content]
def lowerCAmelCase__ ( self: Union[str, Any] , _lowerCAmelCase: str , _lowerCAmelCase: int = 0 ) -> str:
'''simple docstring'''
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and isinstance(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase_ =key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
UpperCAmelCase_ =""
for ch in content:
ans += chr(ord(_lowerCAmelCase ) ^ key )
return ans
def lowerCAmelCase__ ( self: Any , _lowerCAmelCase: str , _lowerCAmelCase: int = 0 ) -> str:
'''simple docstring'''
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and isinstance(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase_ =key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
UpperCAmelCase_ =""
for ch in content:
ans += chr(ord(_lowerCAmelCase ) ^ key )
return ans
def lowerCAmelCase__ ( self: Optional[int] , _lowerCAmelCase: str , _lowerCAmelCase: int = 0 ) -> bool:
'''simple docstring'''
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and isinstance(_lowerCAmelCase , _lowerCAmelCase )
try:
with open(_lowerCAmelCase ) as fin, open("encrypt.out" , "w+" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(_lowerCAmelCase , _lowerCAmelCase ) )
except OSError:
return False
return True
def lowerCAmelCase__ ( self: List[str] , _lowerCAmelCase: str , _lowerCAmelCase: int ) -> bool:
'''simple docstring'''
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and isinstance(_lowerCAmelCase , _lowerCAmelCase )
try:
with open(_lowerCAmelCase ) as fin, open("decrypt.out" , "w+" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(_lowerCAmelCase , _lowerCAmelCase ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 54 |
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = range(2, 20 + 1)
UpperCAmelCase_ : str = [10**k for k in range(ks[-1] + 1)]
UpperCAmelCase_ : dict[int, dict[int, list[list[int]]]] = {}
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = sum(a_i[j] for j in range(_lowerCAmelCase , len(_lowerCAmelCase ) ) )
_lowerCamelCase : List[str] = sum(a_i[j] * base[j] for j in range(min(len(_lowerCAmelCase ) , _lowerCAmelCase ) ) )
_lowerCamelCase , _lowerCamelCase : int = 0, 0
_lowerCamelCase : Dict = n - i
_lowerCamelCase : int = memo.get(_lowerCAmelCase )
if sub_memo is not None:
_lowerCamelCase : List[str] = sub_memo.get(_lowerCAmelCase )
if jumps is not None and len(_lowerCAmelCase ) > 0:
# find and make the largest jump without going over
_lowerCamelCase : List[Any] = -1
for _k in range(len(_lowerCAmelCase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
_lowerCamelCase : Any = _k
break
if max_jump >= 0:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str = jumps[max_jump]
# since the difference between jumps is cached, add c
_lowerCamelCase : str = diff + c
for j in range(min(_lowerCAmelCase , len(_lowerCAmelCase ) ) ):
_lowerCamelCase , _lowerCamelCase : List[Any] = divmod(_lowerCAmelCase , 10 )
if new_c > 0:
add(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
_lowerCamelCase : int = []
else:
_lowerCamelCase : Tuple = {c: []}
_lowerCamelCase : Any = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
_lowerCamelCase , _lowerCamelCase : Optional[int] = next_term(_lowerCAmelCase , k - 1 , i + dn , _lowerCAmelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
_lowerCamelCase , _lowerCamelCase : List[str] = compute(_lowerCAmelCase , _lowerCAmelCase , i + dn , _lowerCAmelCase )
diff += _diff
dn += terms_jumped
_lowerCamelCase : List[str] = sub_memo[c]
# keep jumps sorted by # of terms skipped
_lowerCamelCase : int = 0
while j < len(_lowerCAmelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(_lowerCAmelCase , (diff, dn, k) )
return (diff, dn)
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Any , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any] ):
"""simple docstring"""
if i >= n:
return 0, i
if k > len(_lowerCAmelCase ):
a_i.extend([0 for _ in range(k - len(_lowerCAmelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
_lowerCamelCase : List[str] = i
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Any = 0, 0, 0
for j in range(len(_lowerCAmelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
_lowerCamelCase : int = ds_c + ds_b
diff += addend
_lowerCamelCase : List[str] = 0
for j in range(_lowerCAmelCase ):
_lowerCamelCase : List[Any] = a_i[j] + addend
_lowerCamelCase , _lowerCamelCase : Any = divmod(_lowerCAmelCase , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return diff, i - start_i
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : List[Any] ):
"""simple docstring"""
for j in range(_lowerCAmelCase , len(_lowerCAmelCase ) ):
_lowerCamelCase : Tuple = digits[j] + addend
if s >= 10:
_lowerCamelCase , _lowerCamelCase : Optional[int] = divmod(_lowerCAmelCase , 10 )
_lowerCamelCase : Any = addend // 10 + quotient
else:
_lowerCamelCase : Tuple = s
_lowerCamelCase : List[Any] = addend // 10
if addend == 0:
break
while addend > 0:
_lowerCamelCase , _lowerCamelCase : str = divmod(_lowerCAmelCase , 10 )
digits.append(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : int = 10**15 ):
"""simple docstring"""
_lowerCamelCase : Tuple = [1]
_lowerCamelCase : List[Any] = 1
_lowerCamelCase : List[str] = 0
while True:
_lowerCamelCase , _lowerCamelCase : Dict = next_term(_lowerCAmelCase , 20 , i + dn , _lowerCAmelCase )
dn += terms_jumped
if dn == n - i:
break
_lowerCamelCase : Optional[Any] = 0
for j in range(len(_lowerCAmelCase ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f'''{solution() = }''')
| 44 | 0 |
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
if n == 1 or not isinstance(a_ , a_ ):
return 0
elif n == 2:
return 1
else:
__A = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
__A = 0
__A = 2
while digits < n:
index += 1
__A = len(str(fibonacci(a_ ) ) )
return index
def UpperCAmelCase ( a_ = 1_0_0_0 ) -> int:
"""simple docstring"""
return fibonacci_digits_index(a_ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 55 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
UpperCAmelCase_ : Any = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
lowerCAmelCase_ = field(default=A , metadata={'help': 'Whether tp freeze the encoder.'} )
lowerCAmelCase_ = field(default=A , metadata={'help': 'Whether to freeze the embeddings.'} )
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
lowerCAmelCase_ = field(
default='summarization' , metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'} , )
lowerCAmelCase_ = field(
default=1024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase_ = field(
default=128 , metadata={
'help': (
'The maximum total sequence length for target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase_ = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for validation target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded. '
'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '
'during ``evaluate`` and ``predict``.'
)
} , )
lowerCAmelCase_ = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for test target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase_ = field(default=-1 , metadata={'help': '# training examples. -1 means use all.'} )
lowerCAmelCase_ = field(default=-1 , metadata={'help': '# validation examples. -1 means use all.'} )
lowerCAmelCase_ = field(default=-1 , metadata={'help': '# test examples. -1 means use all.'} )
lowerCAmelCase_ = field(default=A , metadata={'help': 'Source language id for translation.'} )
lowerCAmelCase_ = field(default=A , metadata={'help': 'Target language id for translation.'} )
lowerCAmelCase_ = field(default=A , metadata={'help': '# num_beams to use for evaluation.'} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'} , )
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Any ):
"""simple docstring"""
logger.info(F'***** {split} metrics *****' )
for key in sorted(metrics.keys() ):
logger.info(F' {key} = {metrics[key]}' )
save_json(_lowerCAmelCase , os.path.join(_lowerCAmelCase , F'{split}_results.json' ) )
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : str = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Optional[Any] = parser.parse_args_into_dataclasses()
check_output_dir(_lowerCAmelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s" , _lowerCAmelCase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowerCamelCase : Optional[int] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_lowerCamelCase : Tuple = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
assert hasattr(_lowerCAmelCase , _lowerCAmelCase ), F'({config.__class__.__name__}) doesn\'t have a `{p}` attribute'
setattr(_lowerCAmelCase , _lowerCAmelCase , getattr(_lowerCAmelCase , _lowerCAmelCase ) )
_lowerCamelCase : List[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_lowerCamelCase : int = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=".ckpt" in model_args.model_name_or_path , config=_lowerCAmelCase , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(_lowerCAmelCase , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
_lowerCamelCase : List[Any] = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(_lowerCAmelCase , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCamelCase : Any = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
_lowerCamelCase : int = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(_lowerCAmelCase )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
_lowerCamelCase : int = SeqaSeqDataset
# Get datasets
_lowerCamelCase : Tuple = (
dataset_class(
_lowerCAmelCase , type_path="train" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_train
else None
)
_lowerCamelCase : List[Any] = (
dataset_class(
_lowerCAmelCase , type_path="val" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
_lowerCamelCase : Optional[int] = (
dataset_class(
_lowerCAmelCase , type_path="test" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
_lowerCamelCase : int = (
build_compute_metrics_fn(data_args.task , _lowerCAmelCase ) if training_args.predict_with_generate else None
)
_lowerCamelCase : List[Any] = SeqaSeqTrainer(
model=_lowerCAmelCase , args=_lowerCAmelCase , data_args=_lowerCAmelCase , train_dataset=_lowerCAmelCase , eval_dataset=_lowerCAmelCase , data_collator=SeqaSeqDataCollator(
_lowerCAmelCase , _lowerCAmelCase , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=_lowerCAmelCase , tokenizer=_lowerCAmelCase , )
_lowerCamelCase : Optional[Any] = {}
# Training
if training_args.do_train:
logger.info("*** Train ***" )
_lowerCamelCase : Optional[Any] = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
_lowerCamelCase : int = train_result.metrics
_lowerCamelCase : Optional[int] = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("train" , _lowerCAmelCase , training_args.output_dir )
all_metrics.update(_lowerCAmelCase )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
_lowerCamelCase : Optional[Any] = trainer.evaluate(metric_key_prefix="val" )
_lowerCamelCase : Dict = data_args.n_val
_lowerCamelCase : List[Any] = round(metrics["val_loss"] , 4 )
if trainer.is_world_process_zero():
handle_metrics("val" , _lowerCAmelCase , training_args.output_dir )
all_metrics.update(_lowerCAmelCase )
if training_args.do_predict:
logger.info("*** Predict ***" )
_lowerCamelCase : Any = trainer.predict(test_dataset=_lowerCAmelCase , metric_key_prefix="test" )
_lowerCamelCase : Dict = test_output.metrics
_lowerCamelCase : Optional[int] = data_args.n_test
if trainer.is_world_process_zero():
_lowerCamelCase : int = round(metrics["test_loss"] , 4 )
handle_metrics("test" , _lowerCAmelCase , training_args.output_dir )
all_metrics.update(_lowerCAmelCase )
if training_args.predict_with_generate:
_lowerCamelCase : List[str] = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase )
_lowerCamelCase : Any = lmap(str.strip , _lowerCAmelCase )
write_txt_file(_lowerCAmelCase , os.path.join(training_args.output_dir , "test_generations.txt" ) )
if trainer.is_world_process_zero():
save_json(_lowerCAmelCase , os.path.join(training_args.output_dir , "all_results.json" ) )
return all_metrics
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 44 | 0 |
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_a : Union[str, Any] = 16
_a : Optional[Any] = 32
def _a (lowercase__ : Accelerator , lowercase__ : int = 1_6 ) -> Union[str, Any]:
"""simple docstring"""
__snake_case = AutoTokenizer.from_pretrained('bert-base-cased' )
__snake_case = load_dataset('glue' , 'mrpc' )
def tokenize_function(lowercase__ : Optional[Any] ):
# max_length=None => use the model max length (it's actually the default)
__snake_case = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__snake_case = datasets.map(
lowercase__ , batched=lowercase__ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__snake_case = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(lowercase__ : Optional[int] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__snake_case = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__snake_case = 1_6
elif accelerator.mixed_precision != "no":
__snake_case = 8
else:
__snake_case = None
return tokenizer.pad(
lowercase__ , padding='longest' , max_length=lowercase__ , pad_to_multiple_of=lowercase__ , return_tensors='pt' , )
# Instantiate dataloaders.
__snake_case = DataLoader(
tokenized_datasets['train'] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
__snake_case = DataLoader(
tokenized_datasets['validation'] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_a : Optional[int] = mocked_dataloaders # noqa: F811
def _a (lowercase__ : str , lowercase__ : Dict ) -> Optional[Any]:
"""simple docstring"""
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , lowercase__ ) == "1":
__snake_case = 2
# Initialize accelerator
__snake_case = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__snake_case = config['lr']
__snake_case = int(config['num_epochs'] )
__snake_case = int(config['seed'] )
__snake_case = int(config['batch_size'] )
__snake_case = evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
__snake_case = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__snake_case = batch_size // MAX_GPU_BATCH_SIZE
__snake_case = MAX_GPU_BATCH_SIZE
set_seed(lowercase__ )
__snake_case , __snake_case = get_dataloaders(lowercase__ , lowercase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__snake_case = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=lowercase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__snake_case = model.to(accelerator.device )
# Instantiate optimizer
__snake_case = AdamW(params=model.parameters() , lr=lowercase__ )
# Instantiate scheduler
__snake_case = get_linear_schedule_with_warmup(
optimizer=lowercase__ , num_warmup_steps=1_0_0 , num_training_steps=(len(lowercase__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case = accelerator.prepare(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# Now we train the model
for epoch in range(lowercase__ ):
model.train()
for step, batch in enumerate(lowercase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__snake_case = model(**lowercase__ )
__snake_case = outputs.loss
__snake_case = loss / gradient_accumulation_steps
accelerator.backward(lowercase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
__snake_case = 0
for step, batch in enumerate(lowercase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__snake_case = model(**lowercase__ )
__snake_case = outputs.logits.argmax(dim=-1 )
__snake_case , __snake_case = accelerator.gather((predictions, batch['labels']) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(lowercase__ ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
__snake_case = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__snake_case = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=lowercase__ , references=lowercase__ , )
__snake_case = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:' , lowercase__ )
def _a () -> Union[str, Any]:
"""simple docstring"""
__snake_case = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=lowercase__ , default=lowercase__ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
__snake_case = parser.parse_args()
__snake_case = {'lr': 2e-5, 'num_epochs': 3, 'seed': 4_2, 'batch_size': 1_6}
training_function(lowercase__ , lowercase__ )
if __name__ == "__main__":
main()
| 56 |
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase__ :
def __init__( self : List[Any],__A : str,__A : List[str]=1_3,__A : str=3_2,__A : Tuple=2,__A : Any=3,__A : Dict=1_6,__A : Dict=[3_2, 6_4, 1_2_8],__A : List[str]=[1, 2, 1],__A : str=[2, 2, 4],__A : Optional[int]=2,__A : Dict=2.0,__A : str=True,__A : Tuple=0.0,__A : int=0.0,__A : List[str]=0.1,__A : Any="gelu",__A : List[Any]=False,__A : Optional[Any]=True,__A : List[str]=0.02,__A : Tuple=1e-5,__A : Any=True,__A : Tuple=None,__A : Tuple=True,__A : Tuple=1_0,__A : List[Any]=8,__A : Optional[int]=["stage1", "stage2"],__A : int=[1, 2],):
_lowerCamelCase : List[Any] = parent
_lowerCamelCase : Optional[Any] = batch_size
_lowerCamelCase : Optional[int] = image_size
_lowerCamelCase : int = patch_size
_lowerCamelCase : Optional[Any] = num_channels
_lowerCamelCase : int = embed_dim
_lowerCamelCase : int = hidden_sizes
_lowerCamelCase : List[Any] = depths
_lowerCamelCase : Any = num_heads
_lowerCamelCase : List[str] = window_size
_lowerCamelCase : str = mlp_ratio
_lowerCamelCase : Any = qkv_bias
_lowerCamelCase : str = hidden_dropout_prob
_lowerCamelCase : str = attention_probs_dropout_prob
_lowerCamelCase : List[str] = drop_path_rate
_lowerCamelCase : str = hidden_act
_lowerCamelCase : Union[str, Any] = use_absolute_embeddings
_lowerCamelCase : List[Any] = patch_norm
_lowerCamelCase : Tuple = layer_norm_eps
_lowerCamelCase : str = initializer_range
_lowerCamelCase : Optional[int] = is_training
_lowerCamelCase : Tuple = scope
_lowerCamelCase : List[Any] = use_labels
_lowerCamelCase : int = type_sequence_label_size
_lowerCamelCase : Tuple = encoder_stride
_lowerCamelCase : Any = out_features
_lowerCamelCase : Any = out_indices
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase : List[Any] = None
if self.use_labels:
_lowerCamelCase : str = ids_tensor([self.batch_size],self.type_sequence_label_size )
_lowerCamelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self : Union[str, Any] ):
return FocalNetConfig(
image_size=self.image_size,patch_size=self.patch_size,num_channels=self.num_channels,embed_dim=self.embed_dim,hidden_sizes=self.hidden_sizes,depths=self.depths,num_heads=self.num_heads,window_size=self.window_size,mlp_ratio=self.mlp_ratio,qkv_bias=self.qkv_bias,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,drop_path_rate=self.drop_path_rate,hidden_act=self.hidden_act,use_absolute_embeddings=self.use_absolute_embeddings,path_norm=self.patch_norm,layer_norm_eps=self.layer_norm_eps,initializer_range=self.initializer_range,encoder_stride=self.encoder_stride,out_features=self.out_features,out_indices=self.out_indices,)
def lowerCamelCase_ ( self : int,__A : Union[str, Any],__A : Tuple,__A : List[Any] ):
_lowerCamelCase : Optional[Any] = FocalNetModel(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : Optional[Any] = model(__A )
_lowerCamelCase : Optional[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_lowerCamelCase : Union[str, Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, expected_seq_len, expected_dim) )
def lowerCamelCase_ ( self : int,__A : Optional[int],__A : int,__A : Optional[int] ):
_lowerCamelCase : Any = FocalNetBackbone(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : List[str] = model(__A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ),len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ),[self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ),len(config.out_features ) )
self.parent.assertListEqual(model.channels,config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
_lowerCamelCase : List[str] = None
_lowerCamelCase : List[str] = FocalNetBackbone(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : str = model(__A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ),1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ),[self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ),1 )
self.parent.assertListEqual(model.channels,[config.hidden_sizes[-1]] )
def lowerCamelCase_ ( self : Optional[int],__A : Optional[int],__A : Dict,__A : Dict ):
_lowerCamelCase : List[Any] = FocalNetForMaskedImageModeling(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : List[str] = model(__A )
self.parent.assertEqual(
result.reconstruction.shape,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_lowerCamelCase : Dict = 1
_lowerCamelCase : Any = FocalNetForMaskedImageModeling(__A )
model.to(__A )
model.eval()
_lowerCamelCase : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase : Optional[int] = model(__A )
self.parent.assertEqual(result.reconstruction.shape,(self.batch_size, 1, self.image_size, self.image_size) )
def lowerCamelCase_ ( self : List[Any],__A : Union[str, Any],__A : List[Any],__A : Optional[Any] ):
_lowerCamelCase : Union[str, Any] = self.type_sequence_label_size
_lowerCamelCase : Optional[Any] = FocalNetForImageClassification(__A )
model.to(__A )
model.eval()
_lowerCamelCase : Optional[int] = model(__A,labels=__A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCamelCase : str = 1
_lowerCamelCase : str = FocalNetForImageClassification(__A )
model.to(__A )
model.eval()
_lowerCamelCase : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase : List[Any] = model(__A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : int = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Union[str, Any] = config_and_inputs
_lowerCamelCase : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( A , A , unittest.TestCase ):
lowerCAmelCase_ = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowerCAmelCase_ = (
{'feature-extraction': FocalNetModel, 'image-classification': FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Optional[int] = FocalNetModelTester(self )
_lowerCamelCase : int = ConfigTester(self,config_class=__A,embed_dim=3_7,has_text_modality=__A )
def lowerCamelCase_ ( self : Union[str, Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase_ ( self : List[str] ):
return
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__A )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__A )
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
@unittest.skip(reason="FocalNet does not use inputs_embeds" )
def lowerCamelCase_ ( self : Optional[int] ):
pass
@unittest.skip(reason="FocalNet does not use feedforward chunking" )
def lowerCamelCase_ ( self : List[str] ):
pass
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase , _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_lowerCamelCase : str = model_class(__A )
self.assertIsInstance(model.get_input_embeddings(),(nn.Module) )
_lowerCamelCase : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__A,nn.Linear ) )
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_lowerCamelCase : Union[str, Any] = model_class(__A )
_lowerCamelCase : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : int = [*signature.parameters.keys()]
_lowerCamelCase : Union[str, Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1],__A )
def lowerCamelCase_ ( self : Tuple,__A : Any,__A : List[Any],__A : str,__A : Any ):
_lowerCamelCase : Union[str, Any] = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
_lowerCamelCase : Optional[int] = model(**self._prepare_for_class(__A,__A ) )
_lowerCamelCase : Optional[int] = outputs.hidden_states
_lowerCamelCase : int = getattr(
self.model_tester,"expected_num_hidden_layers",len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__A ),__A )
# FocalNet has a different seq_length
_lowerCamelCase : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCamelCase : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ),[num_patches, self.model_tester.embed_dim],)
_lowerCamelCase : Any = outputs.reshaped_hidden_states
self.assertEqual(len(__A ),__A )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Tuple = reshaped_hidden_states[0].shape
_lowerCamelCase : List[str] = (
reshaped_hidden_states[0].view(__A,__A,height * width ).permute(0,2,1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ),[num_patches, self.model_tester.embed_dim],)
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase , _lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Optional[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
_lowerCamelCase : List[Any] = True
self.check_hidden_states_output(__A,__A,__A,__A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase : List[Any] = True
self.check_hidden_states_output(__A,__A,__A,__A )
def lowerCamelCase_ ( self : Optional[Any] ):
_lowerCamelCase , _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Tuple = 3
_lowerCamelCase : Optional[int] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_lowerCamelCase : Tuple = (
config.patch_size
if isinstance(config.patch_size,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCamelCase : Any = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_lowerCamelCase : int = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
_lowerCamelCase : List[Any] = True
self.check_hidden_states_output(__A,__A,__A,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase : Optional[Any] = True
self.check_hidden_states_output(__A,__A,__A,(padded_height, padded_width) )
@slow
def lowerCamelCase_ ( self : Tuple ):
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Dict = FocalNetModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase , _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Optional[Any] = _config_zero_init(__A )
for model_class in self.all_model_classes:
_lowerCamelCase : Any = model_class(config=__A )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item(),[0.0, 1.0],msg=f'Parameter {name} of model {model_class} seems not properly initialized',)
@require_vision
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self : Union[str, Any] ):
# TODO update organization
return AutoImageProcessor.from_pretrained("microsoft/focalnet-tiny" ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : Any = FocalNetForImageClassification.from_pretrained("microsoft/focalnet-tiny" ).to(__A )
_lowerCamelCase : int = self.default_image_processor
_lowerCamelCase : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
_lowerCamelCase : Dict = image_processor(images=__A,return_tensors="pt" ).to(__A )
# forward pass
with torch.no_grad():
_lowerCamelCase : Dict = model(**__A )
# verify the logits
_lowerCamelCase : List[Any] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape,__A )
_lowerCamelCase : List[str] = torch.tensor([0.2166, -0.4368, 0.2191] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3],__A,atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item(),2_8_1 )
@require_torch
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = (FocalNetBackbone,) if is_torch_available() else ()
lowerCAmelCase_ = FocalNetConfig
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : int = FocalNetModelTester(self )
| 44 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=7 , _lowerCamelCase=3 , _lowerCamelCase=1_8 , _lowerCamelCase=3_0 , _lowerCamelCase=4_0_0 , _lowerCamelCase=True , _lowerCamelCase=3_2 , _lowerCamelCase=True , ):
UpperCamelCase_: Any = parent
UpperCamelCase_: Optional[Any] = batch_size
UpperCamelCase_: List[str] = num_channels
UpperCamelCase_: Optional[Any] = image_size
UpperCamelCase_: Optional[int] = min_resolution
UpperCamelCase_: List[str] = max_resolution
UpperCamelCase_: Tuple = do_resize
UpperCamelCase_: Union[str, Any] = size_divisor
UpperCamelCase_: Optional[int] = do_rescale
def _a ( self ):
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class _lowerCAmelCase( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
a : str =GLPNImageProcessor if is_vision_available() else None
def _a ( self ):
UpperCamelCase_: Tuple = GLPNImageProcessingTester(self )
@property
def _a ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _a ( self ):
UpperCamelCase_: List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase , 'do_resize' ) )
self.assertTrue(hasattr(_lowerCamelCase , 'size_divisor' ) )
self.assertTrue(hasattr(_lowerCamelCase , 'resample' ) )
self.assertTrue(hasattr(_lowerCamelCase , 'do_rescale' ) )
def _a ( self ):
pass
def _a ( self ):
# Initialize image_processing
UpperCamelCase_: int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase_: List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase_: List[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def _a ( self ):
# Initialize image_processing
UpperCamelCase_: Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase_: Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase_: Tuple = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def _a ( self ):
# Initialize image_processing
UpperCamelCase_: Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase_: Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase_: str = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 57 |
'''simple docstring'''
class UpperCAmelCase__ :
def __init__( self : Any,__A : Any,__A : Any,__A : Any ):
_lowerCamelCase : List[Any] = name
_lowerCamelCase : Union[str, Any] = value
_lowerCamelCase : str = weight
def __repr__( self : Any ):
return f'{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'
def lowerCamelCase_ ( self : Optional[int] ):
return self.value
def lowerCamelCase_ ( self : Any ):
return self.name
def lowerCamelCase_ ( self : List[Any] ):
return self.weight
def lowerCamelCase_ ( self : str ):
return self.value / self.weight
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : str = []
for i in range(len(_lowerCAmelCase ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Dict = sorted(_lowerCAmelCase , key=_lowerCAmelCase , reverse=_lowerCAmelCase )
_lowerCamelCase : Optional[int] = []
_lowerCamelCase , _lowerCamelCase : Optional[int] = 0.0, 0.0
for i in range(len(_lowerCAmelCase ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def A_ ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : str , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[str] ):
'''simple docstring'''
snake_case_ : List[Any] = FunnelConfig.from_json_file(__UpperCamelCase )
print(F'Building PyTorch model from configuration: {config}' )
snake_case_ : Dict = FunnelBaseModel(__UpperCamelCase ) if base_model else FunnelModel(__UpperCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , __UpperCamelCase )
if __name__ == "__main__":
__lowerCAmelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--base_model''', action='''store_true''', help='''Whether you want just the base model (no decoder) or not.'''
)
__lowerCAmelCase : Dict = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 58 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase_ : List[Any] = {
'configuration_conditional_detr': [
'CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ConditionalDetrConfig',
'ConditionalDetrOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[int] = ['ConditionalDetrFeatureExtractor']
UpperCAmelCase_ : str = ['ConditionalDetrImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : str = [
'CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConditionalDetrForObjectDetection',
'ConditionalDetrForSegmentation',
'ConditionalDetrModel',
'ConditionalDetrPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 44 | 0 |
def lowerCAmelCase_ ( __a , __a , __a , __a ) -> int:
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__: Optional[int] =len(__a ), len(grid[0] )
if (
min(__a , __a ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
lowerCamelCase__: str =0
count += depth_first_search(__a , row + 1 , __a , __a )
count += depth_first_search(__a , row - 1 , __a , __a )
count += depth_first_search(__a , __a , col + 1 , __a )
count += depth_first_search(__a , __a , col - 1 , __a )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 59 |
'''simple docstring'''
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Tuple = tmp_path / "file.csv"
_lowerCamelCase : Optional[int] = textwrap.dedent(
"\\n header1,header2\n 1,2\n 10,20\n " )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def A_ ( _lowerCAmelCase : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Any = tmp_path / "malformed_file.csv"
_lowerCamelCase : Any = textwrap.dedent(
"\\n header1,header2\n 1,2\n 10,20,\n " )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : int = tmp_path / "csv_with_image.csv"
_lowerCamelCase : int = textwrap.dedent(
F'\\n image\n {image_file}\n ' )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : Dict = tmp_path / "csv_with_label.csv"
_lowerCamelCase : int = textwrap.dedent(
"\\n label\n good\n bad\n good\n " )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def A_ ( _lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : Dict = tmp_path / "csv_with_int_list.csv"
_lowerCamelCase : Any = textwrap.dedent(
"\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n " )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : List[Any] = Csv()
_lowerCamelCase : Any = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(_lowerCAmelCase , match="Error tokenizing data" ):
for _ in generator:
pass
assert any(
record.levelname == "ERROR"
and "Failed to read file" in record.message
and os.path.basename(_lowerCAmelCase ) in record.message
for record in caplog.records )
@require_pil
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
with open(_lowerCAmelCase , encoding="utf-8" ) as f:
_lowerCamelCase : Any = f.read().splitlines()[1]
_lowerCamelCase : Optional[Any] = Csv(encoding="utf-8" , features=Features({"image": Image()} ) )
_lowerCamelCase : Union[str, Any] = csv._generate_tables([[csv_file_with_image]] )
_lowerCamelCase : List[str] = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("image" ).type == Image()()
_lowerCamelCase : int = pa_table.to_pydict()["image"]
assert generated_content == [{"path": image_file, "bytes": None}]
def A_ ( _lowerCAmelCase : List[Any] ):
"""simple docstring"""
with open(_lowerCAmelCase , encoding="utf-8" ) as f:
_lowerCamelCase : List[Any] = f.read().splitlines()[1:]
_lowerCamelCase : int = Csv(encoding="utf-8" , features=Features({"label": ClassLabel(names=["good", "bad"] )} ) )
_lowerCamelCase : Tuple = csv._generate_tables([[csv_file_with_label]] )
_lowerCamelCase : int = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("label" ).type == ClassLabel(names=["good", "bad"] )()
_lowerCamelCase : Union[str, Any] = pa_table.to_pydict()["label"]
assert generated_content == [ClassLabel(names=["good", "bad"] ).straint(_lowerCAmelCase ) for label in labels]
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : Dict = Csv(encoding="utf-8" , sep="," , converters={"int_list": lambda _lowerCAmelCase : [int(_lowerCAmelCase ) for i in x.split()]} )
_lowerCamelCase : List[Any] = csv._generate_tables([[csv_file_with_int_list]] )
_lowerCamelCase : Optional[int] = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field("int_list" ).type )
_lowerCamelCase : Optional[Any] = pa_table.to_pydict()["int_list"]
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 44 | 0 |
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
'''pipelines_utils''',
'''0.22.0''',
'''Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.''',
standard_warn=False,
stacklevel=3,
)
| 60 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class UpperCAmelCase__ ( A , A , unittest.TestCase ):
lowerCAmelCase_ = IFInpaintingSuperResolutionPipeline
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'original_image'} )
lowerCAmelCase_ = PipelineTesterMixin.required_optional_params - {'latents'}
def lowerCamelCase_ ( self : List[str] ):
return self._get_superresolution_dummy_components()
def lowerCamelCase_ ( self : str,__A : List[str],__A : List[str]=0 ):
if str(__A ).startswith("mps" ):
_lowerCamelCase : List[str] = torch.manual_seed(__A )
else:
_lowerCamelCase : Optional[int] = torch.Generator(device=__A ).manual_seed(__A )
_lowerCamelCase : List[Any] = floats_tensor((1, 3, 1_6, 1_6),rng=random.Random(__A ) ).to(__A )
_lowerCamelCase : Any = floats_tensor((1, 3, 3_2, 3_2),rng=random.Random(__A ) ).to(__A )
_lowerCamelCase : Tuple = floats_tensor((1, 3, 3_2, 3_2),rng=random.Random(__A ) ).to(__A )
_lowerCamelCase : Dict = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available(),reason="XFormers attention is only available with CUDA and `xformers` installed",)
def lowerCamelCase_ ( self : Optional[int] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def lowerCamelCase_ ( self : Dict ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda",reason="float16 requires CUDA" )
def lowerCamelCase_ ( self : Optional[Any] ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def lowerCamelCase_ ( self : Any ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def lowerCamelCase_ ( self : Dict ):
self._test_save_load_local()
def lowerCamelCase_ ( self : Any ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2,)
| 44 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Dict=7 , SCREAMING_SNAKE_CASE__ : Tuple=3 , SCREAMING_SNAKE_CASE__ : Optional[int]=18 , SCREAMING_SNAKE_CASE__ : List[str]=30 , SCREAMING_SNAKE_CASE__ : Dict=400 , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=[0.48_145_466, 0.4_578_275, 0.40_821_073] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=[0.26_862_954, 0.26_130_258, 0.27_577_711] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , ) -> int:
lowerCAmelCase__ = size if size is not None else {"height": 224, "width": 224}
lowerCAmelCase__ = crop_size if crop_size is not None else {"height": 18, "width": 18}
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = image_size
lowerCAmelCase__ = min_resolution
lowerCAmelCase__ = max_resolution
lowerCAmelCase__ = do_resize
lowerCAmelCase__ = size
lowerCAmelCase__ = do_center_crop
lowerCAmelCase__ = crop_size
lowerCAmelCase__ = do_normalize
lowerCAmelCase__ = image_mean
lowerCAmelCase__ = image_std
lowerCAmelCase__ = do_convert_rgb
def a ( self : Union[str, Any] ) -> Union[str, Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str]=False , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Optional[Any]=False ) -> Optional[Any]:
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
lowerCAmelCase__ = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
lowerCAmelCase__ = []
for i in range(self.batch_size ):
lowerCAmelCase__ , lowerCAmelCase__ = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
lowerCAmelCase__ = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE__ , 0 , -1 ) ) for x in image_inputs]
if torchify:
lowerCAmelCase__ = [torch.from_numpy(SCREAMING_SNAKE_CASE__ ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class __lowerCamelCase ( UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
snake_case__ = ChineseCLIPImageProcessor if is_vision_available() else None
def a ( self : Optional[Any] ) -> List[str]:
lowerCAmelCase__ = ChineseCLIPImageProcessingTester(self , do_center_crop=SCREAMING_SNAKE_CASE__ )
@property
def a ( self : int ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def a ( self : Tuple ) -> int:
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "do_resize" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "size" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "do_center_crop" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "center_crop" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "do_normalize" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "image_mean" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "image_std" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "do_convert_rgb" ) )
def a ( self : Dict ) -> int:
lowerCAmelCase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 224, "width": 224} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
lowerCAmelCase__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def a ( self : str ) -> Optional[int]:
pass
def a ( self : Tuple ) -> Union[str, Any]:
# Initialize image_processing
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase__ = self.image_processor_tester.prepare_inputs(equal_resolution=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image )
# Test not batched input
lowerCAmelCase__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCAmelCase__ = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def a ( self : Union[str, Any] ) -> Dict:
# Initialize image_processing
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase__ = self.image_processor_tester.prepare_inputs(equal_resolution=SCREAMING_SNAKE_CASE__ , numpify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , np.ndarray )
# Test not batched input
lowerCAmelCase__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCAmelCase__ = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def a ( self : Optional[Any] ) -> Any:
# Initialize image_processing
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase__ = self.image_processor_tester.prepare_inputs(equal_resolution=SCREAMING_SNAKE_CASE__ , torchify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , torch.Tensor )
# Test not batched input
lowerCAmelCase__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCAmelCase__ = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
@require_torch
@require_vision
class __lowerCamelCase ( UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
snake_case__ = ChineseCLIPImageProcessor if is_vision_available() else None
def a ( self : Optional[int] ) -> Union[str, Any]:
lowerCAmelCase__ = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = 3
@property
def a ( self : int ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def a ( self : List[str] ) -> Dict:
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "do_resize" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "size" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "do_center_crop" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "center_crop" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "do_normalize" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "image_mean" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "image_std" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "do_convert_rgb" ) )
def a ( self : Any ) -> Optional[int]:
pass
def a ( self : str ) -> Union[str, Any]:
# Initialize image_processing
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase__ = self.image_processor_tester.prepare_inputs(equal_resolution=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image )
# Test not batched input
lowerCAmelCase__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCAmelCase__ = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 61 |
'''simple docstring'''
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class UpperCAmelCase__ ( A ):
def __init__( self : List[Any],__A : Tuple,__A : Optional[int],__A : Optional[int]=1_0_2_4,__A : int=1_0_2_4,__A : Any=3.6 ):
_lowerCamelCase : List[str] = tokenizer
_lowerCamelCase : Dict = tokenizer.bos_token_id
_lowerCamelCase : Tuple = dataset
_lowerCamelCase : Any = seq_length
_lowerCamelCase : List[Any] = seq_length * chars_per_token * num_of_sequences
def __iter__( self : Tuple ):
_lowerCamelCase : Union[str, Any] = iter(self.dataset )
_lowerCamelCase : str = True
while more_examples:
_lowerCamelCase , _lowerCamelCase : Optional[int] = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(__A )["content"] )
buffer_len += len(buffer[-1] )
except StopIteration:
_lowerCamelCase : Tuple = False
break
_lowerCamelCase : int = tokenizer(__A,truncation=__A )["input_ids"]
_lowerCamelCase : int = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0,len(__A ),self.seq_length ):
_lowerCamelCase : List[str] = all_token_ids[i : i + self.seq_length]
if len(__A ) == self.seq_length:
yield torch.tensor(__A )
def A_ ( _lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = {"streaming": True}
_lowerCamelCase : Optional[Any] = load_dataset(args.dataset_name , split="train" , **_lowerCAmelCase )
_lowerCamelCase : int = ConstantLengthDataset(_lowerCAmelCase , _lowerCAmelCase , seq_length=args.seq_length )
_lowerCamelCase : Dict = DataLoader(_lowerCAmelCase , batch_size=args.batch_size )
return eval_dataloader
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
model.eval()
_lowerCamelCase : Optional[int] = []
for step, batch in enumerate(_lowerCAmelCase ):
with torch.no_grad():
_lowerCamelCase : List[str] = model(_lowerCAmelCase , labels=_lowerCAmelCase )
_lowerCamelCase : List[Any] = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(_lowerCAmelCase ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
_lowerCamelCase : Dict = torch.mean(torch.cat(_lowerCAmelCase ) )
try:
_lowerCamelCase : List[Any] = torch.exp(_lowerCAmelCase )
except OverflowError:
_lowerCamelCase : Optional[int] = float("inf" )
return loss.item(), perplexity.item()
# Setup Accelerator
UpperCAmelCase_ : List[str] = Accelerator()
# Parse configuration
UpperCAmelCase_ : Tuple = HfArgumentParser(EvaluationArguments)
UpperCAmelCase_ : Dict = parser.parse_args()
set_seed(args.seed)
# Logging
UpperCAmelCase_ : Optional[int] = logging.getLogger(__name__)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
# Load model and tokenizer
UpperCAmelCase_ : Tuple = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
UpperCAmelCase_ : int = create_dataloader(args)
# Prepare everything with our `accelerator`.
UpperCAmelCase_, UpperCAmelCase_ : Dict = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('Evaluating and saving model after training')
UpperCAmelCase_, UpperCAmelCase_ : str = evaluate(args)
logger.info(f'''loss/eval: {eval_loss}, perplexity: {perplexity}''')
| 44 | 0 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
snake_case = """\
@misc{wu2016googles,
title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
snake_case = """\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the 'GLEU score'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score's range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
"""
snake_case = """\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
'google_bleu': google_bleu score
Examples:
Example 1:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.44
Example 2:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.61
Example 3:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results[\"google_bleu\"], 2))
0.53
Example 4:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results[\"google_bleu\"], 2))
0.4
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE ( datasets.Metric ):
'''simple docstring'''
def _A ( self : Optional[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def _A ( self : List[Any] , UpperCAmelCase_ : List[List[List[str]]] , UpperCAmelCase_ : List[List[str]] , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : int = 4 , ):
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=UpperCAmelCase_ , hypotheses=UpperCAmelCase_ , min_len=UpperCAmelCase_ , max_len=UpperCAmelCase_ )
}
| 62 |
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : List[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase_ : Union[str, Any] = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
UpperCAmelCase_ : List[str] = {
'allenai/led-base-16384': 1_6384,
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = LEDTokenizer
lowerCAmelCase_ = ['input_ids', 'attention_mask']
def __init__( self : Union[str, Any],__A : List[Any]=None,__A : str=None,__A : str=None,__A : Optional[int]="replace",__A : Union[str, Any]="<s>",__A : Union[str, Any]="</s>",__A : Any="</s>",__A : Optional[int]="<s>",__A : List[str]="<unk>",__A : str="<pad>",__A : Tuple="<mask>",__A : Union[str, Any]=False,__A : Optional[int]=True,**__A : Optional[int],):
super().__init__(
__A,__A,tokenizer_file=__A,errors=__A,bos_token=__A,eos_token=__A,sep_token=__A,cls_token=__A,unk_token=__A,pad_token=__A,mask_token=__A,add_prefix_space=__A,trim_offsets=__A,**__A,)
_lowerCamelCase : List[str] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space",__A ) != add_prefix_space:
_lowerCamelCase : str = getattr(__A,pre_tok_state.pop("type" ) )
_lowerCamelCase : List[Any] = add_prefix_space
_lowerCamelCase : Tuple = pre_tok_class(**__A )
_lowerCamelCase : Optional[int] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_lowerCamelCase : List[str] = "post_processor"
_lowerCamelCase : int = getattr(self.backend_tokenizer,__A,__A )
if tokenizer_component_instance:
_lowerCamelCase : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCamelCase : str = tuple(state["sep"] )
if "cls" in state:
_lowerCamelCase : List[str] = tuple(state["cls"] )
_lowerCamelCase : Dict = False
if state.get("add_prefix_space",__A ) != add_prefix_space:
_lowerCamelCase : List[str] = add_prefix_space
_lowerCamelCase : List[Any] = True
if state.get("trim_offsets",__A ) != trim_offsets:
_lowerCamelCase : List[str] = trim_offsets
_lowerCamelCase : List[str] = True
if changes_to_apply:
_lowerCamelCase : Tuple = getattr(__A,state.pop("type" ) )
_lowerCamelCase : Any = component_class(**__A )
setattr(self.backend_tokenizer,__A,__A )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def lowerCamelCase_ ( self : str ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCamelCase_ ( self : List[str],__A : str ):
_lowerCamelCase : Optional[Any] = AddedToken(__A,lstrip=__A,rstrip=__A ) if isinstance(__A,__A ) else value
_lowerCamelCase : str = value
def lowerCamelCase_ ( self : List[str],*__A : List[Any],**__A : int ):
_lowerCamelCase : List[str] = kwargs.get("is_split_into_words",__A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : Optional[int],*__A : Optional[Any],**__A : Union[str, Any] ):
_lowerCamelCase : List[Any] = kwargs.get("is_split_into_words",__A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : Dict,__A : str,__A : Optional[str] = None ):
_lowerCamelCase : List[str] = self._tokenizer.model.save(__A,name=__A )
return tuple(__A )
def lowerCamelCase_ ( self : List[str],__A : Optional[Any],__A : List[str]=None ):
_lowerCamelCase : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self : Dict,__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : Tuple = [self.sep_token_id]
_lowerCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase_ ( self : Any,__A : Union[Dict[str, EncodedInput], BatchEncoding],__A : Optional[int] = None,__A : PaddingStrategy = PaddingStrategy.DO_NOT_PAD,__A : Optional[int] = None,__A : Optional[bool] = None,):
_lowerCamelCase : List[str] = super()._pad(
encoded_inputs=__A,max_length=__A,padding_strategy=__A,pad_to_multiple_of=__A,return_attention_mask=__A,)
# Load from model defaults
if return_attention_mask is None:
_lowerCamelCase : Any = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
_lowerCamelCase : Union[str, Any] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
_lowerCamelCase : Optional[Any] = len(encoded_inputs["global_attention_mask"] ) != len(__A )
if needs_to_be_padded:
_lowerCamelCase : str = len(__A ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
_lowerCamelCase : Tuple = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
_lowerCamelCase : int = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 44 | 0 |
a : Optional[int] = 9.8_06_65
def lowerCamelCase__ ( __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : float = g ):
if fluid_density <= 0:
raise ValueError("""Impossible fluid density""" )
if volume < 0:
raise ValueError("""Impossible Object volume""" )
if gravity <= 0:
raise ValueError("""Impossible Gravity""" )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 63 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
def A_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int]=False ):
"""simple docstring"""
_lowerCamelCase : List[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'blocks.{i}.norm1.weight', F'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'blocks.{i}.norm1.bias', F'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((F'blocks.{i}.attn.proj.weight', F'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((F'blocks.{i}.attn.proj.bias', F'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'blocks.{i}.norm2.weight', F'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'blocks.{i}.norm2.bias', F'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc1.weight', F'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc1.bias', F'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc2.weight', F'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc2.bias', F'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_lowerCamelCase : Optional[int] = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def A_ ( _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any]=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
_lowerCamelCase : int = ""
else:
_lowerCamelCase : int = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCamelCase : Any = state_dict.pop(F'blocks.{i}.attn.qkv.weight' )
_lowerCamelCase : Tuple = state_dict.pop(F'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : List[str] = in_proj_weight[
: config.hidden_size, :
]
_lowerCamelCase : List[str] = in_proj_bias[: config.hidden_size]
_lowerCamelCase : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCamelCase : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCamelCase : Any = in_proj_weight[
-config.hidden_size :, :
]
_lowerCamelCase : List[str] = in_proj_bias[-config.hidden_size :]
def A_ ( _lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : List[str] = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = dct.pop(_lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = val
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : Optional[int] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def A_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : str = ViTConfig()
_lowerCamelCase : List[str] = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
_lowerCamelCase : Optional[Any] = True
_lowerCamelCase : Optional[Any] = int(vit_name[-12:-10] )
_lowerCamelCase : str = int(vit_name[-9:-6] )
else:
_lowerCamelCase : List[Any] = 1000
_lowerCamelCase : str = "huggingface/label-files"
_lowerCamelCase : Any = "imagenet-1k-id2label.json"
_lowerCamelCase : int = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" ) , "r" ) )
_lowerCamelCase : str = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
_lowerCamelCase : Optional[Any] = idalabel
_lowerCamelCase : List[str] = {v: k for k, v in idalabel.items()}
_lowerCamelCase : List[str] = int(vit_name[-6:-4] )
_lowerCamelCase : str = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith("tiny" ):
_lowerCamelCase : List[Any] = 192
_lowerCamelCase : Optional[int] = 768
_lowerCamelCase : Union[str, Any] = 12
_lowerCamelCase : Optional[Any] = 3
elif vit_name[9:].startswith("small" ):
_lowerCamelCase : Optional[Any] = 384
_lowerCamelCase : Optional[Any] = 1536
_lowerCamelCase : int = 12
_lowerCamelCase : List[str] = 6
else:
pass
else:
if vit_name[4:].startswith("small" ):
_lowerCamelCase : List[str] = 768
_lowerCamelCase : Optional[Any] = 2304
_lowerCamelCase : List[Any] = 8
_lowerCamelCase : List[Any] = 8
elif vit_name[4:].startswith("base" ):
pass
elif vit_name[4:].startswith("large" ):
_lowerCamelCase : List[Any] = 1024
_lowerCamelCase : Optional[Any] = 4096
_lowerCamelCase : List[Any] = 24
_lowerCamelCase : Union[str, Any] = 16
elif vit_name[4:].startswith("huge" ):
_lowerCamelCase : str = 1280
_lowerCamelCase : List[Any] = 5120
_lowerCamelCase : List[str] = 32
_lowerCamelCase : List[str] = 16
# load original model from timm
_lowerCamelCase : int = timm.create_model(_lowerCAmelCase , pretrained=_lowerCAmelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_lowerCamelCase : Any = timm_model.state_dict()
if base_model:
remove_classification_head_(_lowerCAmelCase )
_lowerCamelCase : Optional[int] = create_rename_keys(_lowerCAmelCase , _lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# load HuggingFace model
if vit_name[-5:] == "in21k":
_lowerCamelCase : int = ViTModel(_lowerCAmelCase ).eval()
else:
_lowerCamelCase : List[str] = ViTForImageClassification(_lowerCAmelCase ).eval()
model.load_state_dict(_lowerCAmelCase )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
_lowerCamelCase : Union[str, Any] = DeiTImageProcessor(size=config.image_size )
else:
_lowerCamelCase : Union[str, Any] = ViTImageProcessor(size=config.image_size )
_lowerCamelCase : Optional[int] = image_processor(images=prepare_img() , return_tensors="pt" )
_lowerCamelCase : Optional[int] = encoding["pixel_values"]
_lowerCamelCase : Union[str, Any] = model(_lowerCAmelCase )
if base_model:
_lowerCamelCase : int = timm_model.forward_features(_lowerCAmelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_lowerCAmelCase , outputs.pooler_output , atol=1E-3 )
else:
_lowerCamelCase : Union[str, Any] = timm_model(_lowerCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCAmelCase , outputs.logits , atol=1E-3 )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(F'Saving model {vit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(_lowerCAmelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_patch16_224',
type=str,
help='Name of the ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
UpperCAmelCase_ : Optional[int] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 44 | 0 |
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
lowercase_ : List[Any] = {'LayoutLMv2Config', 'LayoutLMv3Config'}
@is_pipeline_test
class _lowerCamelCase ( unittest.TestCase ):
__a = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__a = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
__a = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
__a = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Optional[int]:
SCREAMING_SNAKE_CASE__: str= ZeroShotClassificationPipeline(
model=lowerCAmelCase , tokenizer=lowerCAmelCase , candidate_labels=['''polics''', '''health'''] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> int:
SCREAMING_SNAKE_CASE__: List[Any]= classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics''' )
self.assertEqual(lowerCAmelCase , {'''sequence''': ANY(lowerCAmelCase ), '''labels''': [ANY(lowerCAmelCase )], '''scores''': [ANY(lowerCAmelCase )]} )
# No kwarg
SCREAMING_SNAKE_CASE__: int= classifier('''Who are you voting for in 2020?''' , ['''politics'''] )
self.assertEqual(lowerCAmelCase , {'''sequence''': ANY(lowerCAmelCase ), '''labels''': [ANY(lowerCAmelCase )], '''scores''': [ANY(lowerCAmelCase )]} )
SCREAMING_SNAKE_CASE__: List[Any]= classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics'''] )
self.assertEqual(lowerCAmelCase , {'''sequence''': ANY(lowerCAmelCase ), '''labels''': [ANY(lowerCAmelCase )], '''scores''': [ANY(lowerCAmelCase )]} )
SCREAMING_SNAKE_CASE__: Dict= classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics, public health''' )
self.assertEqual(
lowerCAmelCase , {'''sequence''': ANY(lowerCAmelCase ), '''labels''': [ANY(lowerCAmelCase ), ANY(lowerCAmelCase )], '''scores''': [ANY(lowerCAmelCase ), ANY(lowerCAmelCase )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''] ) ) , 1.0 )
SCREAMING_SNAKE_CASE__: Optional[Any]= classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health'''] )
self.assertEqual(
lowerCAmelCase , {'''sequence''': ANY(lowerCAmelCase ), '''labels''': [ANY(lowerCAmelCase ), ANY(lowerCAmelCase )], '''scores''': [ANY(lowerCAmelCase ), ANY(lowerCAmelCase )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''] ) ) , 1.0 )
SCREAMING_SNAKE_CASE__: Optional[Any]= classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''This text is about {}''' )
self.assertEqual(lowerCAmelCase , {'''sequence''': ANY(lowerCAmelCase ), '''labels''': [ANY(lowerCAmelCase )], '''scores''': [ANY(lowerCAmelCase )]} )
# https://github.com/huggingface/transformers/issues/13846
SCREAMING_SNAKE_CASE__: str= classifier(['''I am happy'''] , ['''positive''', '''negative'''] )
self.assertEqual(
lowerCAmelCase , [
{'''sequence''': ANY(lowerCAmelCase ), '''labels''': [ANY(lowerCAmelCase ), ANY(lowerCAmelCase )], '''scores''': [ANY(lowerCAmelCase ), ANY(lowerCAmelCase )]}
for i in range(1 )
] , )
SCREAMING_SNAKE_CASE__: Tuple= classifier(['''I am happy''', '''I am sad'''] , ['''positive''', '''negative'''] )
self.assertEqual(
lowerCAmelCase , [
{'''sequence''': ANY(lowerCAmelCase ), '''labels''': [ANY(lowerCAmelCase ), ANY(lowerCAmelCase )], '''scores''': [ANY(lowerCAmelCase ), ANY(lowerCAmelCase )]}
for i in range(2 )
] , )
with self.assertRaises(lowerCAmelCase ):
classifier('''''' , candidate_labels='''politics''' )
with self.assertRaises(lowerCAmelCase ):
classifier(lowerCAmelCase , candidate_labels='''politics''' )
with self.assertRaises(lowerCAmelCase ):
classifier('''Who are you voting for in 2020?''' , candidate_labels='''''' )
with self.assertRaises(lowerCAmelCase ):
classifier('''Who are you voting for in 2020?''' , candidate_labels=lowerCAmelCase )
with self.assertRaises(lowerCAmelCase ):
classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''Not formatting template''' , )
with self.assertRaises(lowerCAmelCase ):
classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template=lowerCAmelCase , )
self.run_entailment_id(lowerCAmelCase )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: Optional[Any]= zero_shot_classifier.model.config
SCREAMING_SNAKE_CASE__: str= config.labelaid
SCREAMING_SNAKE_CASE__: int= zero_shot_classifier.entailment_id
SCREAMING_SNAKE_CASE__: str= {'''LABEL_0''': 0, '''LABEL_1''': 1, '''LABEL_2''': 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
SCREAMING_SNAKE_CASE__: Any= {'''entailment''': 0, '''neutral''': 1, '''contradiction''': 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
SCREAMING_SNAKE_CASE__: List[Any]= {'''ENTAIL''': 0, '''NON-ENTAIL''': 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
SCREAMING_SNAKE_CASE__: Tuple= {'''ENTAIL''': 2, '''NEUTRAL''': 1, '''CONTR''': 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
SCREAMING_SNAKE_CASE__: Dict= original_labelaid
self.assertEqual(lowerCAmelCase , zero_shot_classifier.entailment_id )
@require_torch
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__: Union[str, Any]= pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
'''Who are you voting for in 2020?''' * 100 , candidate_labels=['''politics''', '''public health''', '''science'''] )
@require_torch
def UpperCamelCase_ ( self ) -> Dict:
SCREAMING_SNAKE_CASE__: Optional[int]= pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , )
SCREAMING_SNAKE_CASE__: Any= zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(lowerCAmelCase ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''science''', '''public health''', '''politics'''],
'''scores''': [0.333, 0.333, 0.333],
} , )
@require_tf
def UpperCamelCase_ ( self ) -> str:
SCREAMING_SNAKE_CASE__: Tuple= pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''tf''' , )
SCREAMING_SNAKE_CASE__: List[str]= zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(lowerCAmelCase ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''science''', '''public health''', '''politics'''],
'''scores''': [0.333, 0.333, 0.333],
} , )
@slow
@require_torch
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: int= pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''pt''' )
SCREAMING_SNAKE_CASE__: List[Any]= zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(lowerCAmelCase ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''politics''', '''public health''', '''science'''],
'''scores''': [0.976, 0.015, 0.009],
} , )
SCREAMING_SNAKE_CASE__: Dict= zero_shot_classifier(
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'''
''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'''
''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'''
''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'''
''' machine translation tasks show these models to be superior in quality while being more parallelizable'''
''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'''
''' English-to-German translation task, improving over the existing best results, including ensembles by'''
''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'''
''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'''
''' fraction of the training costs of the best models from the literature. We show that the Transformer'''
''' generalizes well to other tasks by applying it successfully to English constituency parsing both with'''
''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=lowerCAmelCase , )
self.assertEqual(
nested_simplify(lowerCAmelCase ) , {
'''sequence''': (
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural'''
''' networks in an encoder-decoder configuration. The best performing models also connect the'''
''' encoder and decoder through an attention mechanism. We propose a new simple network'''
''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'''
''' and convolutions entirely. Experiments on two machine translation tasks show these models to be'''
''' superior in quality while being more parallelizable and requiring significantly less time to'''
''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'''
''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'''
''' English-to-French translation task, our model establishes a new single-model state-of-the-art'''
''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'''
''' costs of the best models from the literature. We show that the Transformer generalizes well to'''
''' other tasks by applying it successfully to English constituency parsing both with large and'''
''' limited training data.'''
),
'''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''],
'''scores''': [0.817, 0.713, 0.018, 0.018],
} , )
@slow
@require_tf
def UpperCamelCase_ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE__: Any= pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''tf''' )
SCREAMING_SNAKE_CASE__: Tuple= zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(lowerCAmelCase ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''politics''', '''public health''', '''science'''],
'''scores''': [0.976, 0.015, 0.009],
} , )
SCREAMING_SNAKE_CASE__: Any= zero_shot_classifier(
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'''
''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'''
''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'''
''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'''
''' machine translation tasks show these models to be superior in quality while being more parallelizable'''
''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'''
''' English-to-German translation task, improving over the existing best results, including ensembles by'''
''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'''
''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'''
''' fraction of the training costs of the best models from the literature. We show that the Transformer'''
''' generalizes well to other tasks by applying it successfully to English constituency parsing both with'''
''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=lowerCAmelCase , )
self.assertEqual(
nested_simplify(lowerCAmelCase ) , {
'''sequence''': (
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural'''
''' networks in an encoder-decoder configuration. The best performing models also connect the'''
''' encoder and decoder through an attention mechanism. We propose a new simple network'''
''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'''
''' and convolutions entirely. Experiments on two machine translation tasks show these models to be'''
''' superior in quality while being more parallelizable and requiring significantly less time to'''
''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'''
''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'''
''' English-to-French translation task, our model establishes a new single-model state-of-the-art'''
''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'''
''' costs of the best models from the literature. We show that the Transformer generalizes well to'''
''' other tasks by applying it successfully to English constituency parsing both with large and'''
''' limited training data.'''
),
'''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''],
'''scores''': [0.817, 0.713, 0.018, 0.018],
} , )
| 64 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : int = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def A_ ( _lowerCAmelCase : int = 5000 ):
"""simple docstring"""
_lowerCamelCase : Dict = [(i * (3 * i - 1)) // 2 for i in range(1 , _lowerCAmelCase )]
for i, pentagonal_i in enumerate(_lowerCAmelCase ):
for j in range(_lowerCAmelCase , len(_lowerCAmelCase ) ):
_lowerCamelCase : List[Any] = pentagonal_nums[j]
_lowerCamelCase : Any = pentagonal_i + pentagonal_j
_lowerCamelCase : Union[str, Any] = pentagonal_j - pentagonal_i
if is_pentagonal(_lowerCAmelCase ) and is_pentagonal(_lowerCAmelCase ):
return b
return -1
if __name__ == "__main__":
print(f'''{solution() = }''')
| 44 | 0 |
"""simple docstring"""
__UpperCAmelCase = {
'A': '.-', 'B': '-...', 'C': '-.-.', 'D': '-..', 'E': '.', 'F': '..-.', 'G': '--.',
'H': '....', 'I': '..', 'J': '.---', 'K': '-.-', 'L': '.-..', 'M': '--', 'N': '-.',
'O': '---', 'P': '.--.', 'Q': '--.-', 'R': '.-.', 'S': '...', 'T': '-', 'U': '..-',
'V': '...-', 'W': '.--', 'X': '-..-', 'Y': '-.--', 'Z': '--..', '1': '.----',
'2': '..---', '3': '...--', '4': '....-', '5': '.....', '6': '-....', '7': '--...',
'8': '---..', '9': '----.', '0': '-----', '&': '.-...', '@': '.--.-.',
':': '---...', ',': '--..--', '.': '.-.-.-', '\'': '.----.', '"': '.-..-.',
'?': '..--..', '/': '-..-.', '=': '-...-', '+': '.-.-.', '-': '-....-',
'(': '-.--.', ')': '-.--.-', '!': '-.-.--', ' ': '/'
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
__UpperCAmelCase = {value: key for key, value in MORSE_CODE_DICT.items()}
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
return "".join(REVERSE_DICT[char] for char in message.split() )
def lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = """Morse code here!"""
print(__UpperCamelCase )
UpperCAmelCase__ : List[str] = encrypt(__UpperCamelCase )
print(__UpperCamelCase )
UpperCAmelCase__ : int = decrypt(__UpperCamelCase )
print(__UpperCamelCase )
if __name__ == "__main__":
main()
| 65 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ : List[Any] = {
'configuration_mobilebert': [
'MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'MobileBertConfig',
'MobileBertOnnxConfig',
],
'tokenization_mobilebert': ['MobileBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[Any] = ['MobileBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[str] = [
'MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileBertForMaskedLM',
'MobileBertForMultipleChoice',
'MobileBertForNextSentencePrediction',
'MobileBertForPreTraining',
'MobileBertForQuestionAnswering',
'MobileBertForSequenceClassification',
'MobileBertForTokenClassification',
'MobileBertLayer',
'MobileBertModel',
'MobileBertPreTrainedModel',
'load_tf_weights_in_mobilebert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Union[str, Any] = [
'TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileBertForMaskedLM',
'TFMobileBertForMultipleChoice',
'TFMobileBertForNextSentencePrediction',
'TFMobileBertForPreTraining',
'TFMobileBertForQuestionAnswering',
'TFMobileBertForSequenceClassification',
'TFMobileBertForTokenClassification',
'TFMobileBertMainLayer',
'TFMobileBertModel',
'TFMobileBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 44 | 0 |
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> list:
if len(SCREAMING_SNAKE_CASE ) <= 1:
return [tuple(SCREAMING_SNAKE_CASE )]
_lowercase : List[Any] = []
def generate(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
_lowercase : Tuple = [0] * n
res.append(tuple(SCREAMING_SNAKE_CASE ) )
_lowercase : int = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
_lowercase , _lowercase : Optional[Any] = arr[i], arr[0]
else:
_lowercase , _lowercase : Union[str, Any] = arr[i], arr[c[i]]
res.append(tuple(SCREAMING_SNAKE_CASE ) )
c[i] += 1
_lowercase : Tuple = 0
else:
_lowercase : str = 0
i += 1
generate(len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
return res
if __name__ == "__main__":
UpperCamelCase = input("Enter numbers separated by a comma:\n").strip()
UpperCamelCase = [int(item) for item in user_input.split(",")]
print(heaps(arr))
| 66 |
'''simple docstring'''
from __future__ import annotations
from scipy.special import comb # type: ignore
class UpperCAmelCase__ :
def __init__( self : Optional[Any],__A : list[tuple[float, float]] ):
_lowerCamelCase : Tuple = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
_lowerCamelCase : int = len(__A ) - 1
def lowerCamelCase_ ( self : Optional[int],__A : float ):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
_lowerCamelCase : list[float] = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree,__A ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(__A ),5 ) == 1
return output_values
def lowerCamelCase_ ( self : int,__A : float ):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
_lowerCamelCase : List[Any] = self.basis_function(__A )
_lowerCamelCase : str = 0.0
_lowerCamelCase : str = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def lowerCamelCase_ ( self : Optional[Any],__A : float = 0.01 ):
from matplotlib import pyplot as plt # type: ignore
_lowerCamelCase : list[float] = [] # x coordinates of points to plot
_lowerCamelCase : list[float] = [] # y coordinates of points to plot
_lowerCamelCase : Tuple = 0.0
while t <= 1:
_lowerCamelCase : str = self.bezier_curve_function(__A )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
_lowerCamelCase : List[str] = [i[0] for i in self.list_of_points]
_lowerCamelCase : Union[str, Any] = [i[1] for i in self.list_of_points]
plt.plot(
__A,__A,color="blue",label="Curve of Degree " + str(self.degree ),)
plt.scatter(__A,__A,color="red",label="Control Points" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 44 | 0 |
class A_ :
"""simple docstring"""
def __init__( self : List[str] ) -> Any:
_lowercase = 0
_lowercase = 0
_lowercase = {}
def __UpperCAmelCase ( self : Union[str, Any] ,__A : Tuple ) -> Dict:
if vertex not in self.adjacency:
_lowercase = {}
self.num_vertices += 1
def __UpperCAmelCase ( self : Dict ,__A : List[Any] ,__A : Tuple ,__A : str ) -> Union[str, Any]:
self.add_vertex(__A )
self.add_vertex(__A )
if head == tail:
return
_lowercase = weight
_lowercase = weight
def __UpperCAmelCase ( self : List[str] ) -> str:
_lowercase = self.get_edges()
for edge in edges:
_lowercase , _lowercase , _lowercase = edge
edges.remove((tail, head, weight) )
for i in range(len(__A ) ):
_lowercase = list(edges[i] )
edges.sort(key=lambda __A : e[2] )
for i in range(len(__A ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
_lowercase = edges[i][2] + 1
for edge in edges:
_lowercase , _lowercase , _lowercase = edge
_lowercase = weight
_lowercase = weight
def __str__( self : List[Any] ) -> Tuple:
_lowercase = ''
for tail in self.adjacency:
for head in self.adjacency[tail]:
_lowercase = self.adjacency[head][tail]
string += F"""{head} -> {tail} == {weight}\n"""
return string.rstrip('\n' )
def __UpperCAmelCase ( self : Any ) -> Dict:
_lowercase = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def __UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
return self.adjacency.keys()
@staticmethod
def __UpperCAmelCase ( __A : str=None ,__A : Optional[int]=None ) -> int:
_lowercase = Graph()
if vertices is None:
_lowercase = []
if edges is None:
_lowercase = []
for vertex in vertices:
g.add_vertex(__A )
for edge in edges:
g.add_edge(*__A )
return g
class A_ :
"""simple docstring"""
def __init__( self : Any ) -> int:
_lowercase = {}
_lowercase = {}
def __len__( self : Optional[int] ) -> Optional[Any]:
return len(self.parent )
def __UpperCAmelCase ( self : str ,__A : List[str] ) -> Any:
if item in self.parent:
return self.find(__A )
_lowercase = item
_lowercase = 0
return item
def __UpperCAmelCase ( self : List[Any] ,__A : Dict ) -> List[str]:
if item not in self.parent:
return self.make_set(__A )
if item != self.parent[item]:
_lowercase = self.find(self.parent[item] )
return self.parent[item]
def __UpperCAmelCase ( self : Optional[int] ,__A : Optional[int] ,__A : List[Any] ) -> Any:
_lowercase = self.find(__A )
_lowercase = self.find(__A )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
_lowercase = roota
return roota
if self.rank[roota] < self.rank[roota]:
_lowercase = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
_lowercase = roota
return roota
return None
@staticmethod
def __UpperCAmelCase ( __A : Union[str, Any] ) -> List[Any]:
_lowercase = graph.num_vertices
_lowercase = Graph.UnionFind()
_lowercase = []
while num_components > 1:
_lowercase = {}
for vertex in graph.get_vertices():
_lowercase = -1
_lowercase = graph.get_edges()
for edge in edges:
_lowercase , _lowercase , _lowercase = edge
edges.remove((tail, head, weight) )
for edge in edges:
_lowercase , _lowercase , _lowercase = edge
_lowercase = union_find.find(__A )
_lowercase = union_find.find(__A )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_lowercase = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_lowercase = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
_lowercase , _lowercase , _lowercase = cheap_edge[vertex]
if union_find.find(__A ) != union_find.find(__A ):
union_find.union(__A ,__A )
mst_edges.append(cheap_edge[vertex] )
_lowercase = num_components - 1
_lowercase = Graph.build(edges=__A )
return mst
| 67 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase__ ( metaclass=A ):
lowerCAmelCase_ = ['transformers', 'torch', 'note_seq']
def __init__( self : str,*__A : List[str],**__A : List[Any] ):
requires_backends(self,["transformers", "torch", "note_seq"] )
@classmethod
def lowerCamelCase_ ( cls : Optional[Any],*__A : str,**__A : Tuple ):
requires_backends(cls,["transformers", "torch", "note_seq"] )
@classmethod
def lowerCamelCase_ ( cls : Dict,*__A : Dict,**__A : Tuple ):
requires_backends(cls,["transformers", "torch", "note_seq"] )
| 44 | 0 |
def lowercase__ ( A_: str ) -> list:
"""simple docstring"""
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(A_ ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__("doctest").testmod()
| 68 |
'''simple docstring'''
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = CodeGenTokenizer
lowerCAmelCase_ = CodeGenTokenizerFast
lowerCAmelCase_ = True
lowerCAmelCase_ = {'add_prefix_space': True}
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : List[str] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCamelCase : Dict = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
_lowerCamelCase : Any = dict(zip(__A,range(len(__A ) ) ) )
_lowerCamelCase : Optional[int] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
_lowerCamelCase : Tuple = {"unk_token": "<unk>"}
_lowerCamelCase : Optional[Any] = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["vocab_file"] )
_lowerCamelCase : Dict = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file,"w",encoding="utf-8" ) as fp:
fp.write(json.dumps(__A ) + "\n" )
with open(self.merges_file,"w",encoding="utf-8" ) as fp:
fp.write("\n".join(__A ) )
def lowerCamelCase_ ( self : Dict,**__A : Tuple ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname,**__A )
def lowerCamelCase_ ( self : Union[str, Any],**__A : int ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname,**__A )
def lowerCamelCase_ ( self : str,__A : Dict ):
_lowerCamelCase : Optional[Any] = "lower newer"
_lowerCamelCase : Union[str, Any] = "lower newer"
return input_text, output_text
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : int = CodeGenTokenizer(self.vocab_file,self.merges_file,**self.special_tokens_map )
_lowerCamelCase : Any = "lower newer"
_lowerCamelCase : Optional[Any] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
_lowerCamelCase : List[Any] = tokenizer.tokenize(__A,add_prefix_space=__A )
self.assertListEqual(__A,__A )
_lowerCamelCase : Union[str, Any] = tokens + [tokenizer.unk_token]
_lowerCamelCase : Dict = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ),__A )
def lowerCamelCase_ ( self : Any ):
if not self.test_rust_tokenizer:
return
_lowerCamelCase : str = self.get_tokenizer()
_lowerCamelCase : Optional[Any] = self.get_rust_tokenizer(add_prefix_space=__A )
_lowerCamelCase : Union[str, Any] = "lower newer"
# Testing tokenization
_lowerCamelCase : List[Any] = tokenizer.tokenize(__A,add_prefix_space=__A )
_lowerCamelCase : str = rust_tokenizer.tokenize(__A )
self.assertListEqual(__A,__A )
# Testing conversion to ids without special tokens
_lowerCamelCase : str = tokenizer.encode(__A,add_special_tokens=__A,add_prefix_space=__A )
_lowerCamelCase : List[str] = rust_tokenizer.encode(__A,add_special_tokens=__A )
self.assertListEqual(__A,__A )
# Testing conversion to ids with special tokens
_lowerCamelCase : List[Any] = self.get_rust_tokenizer(add_prefix_space=__A )
_lowerCamelCase : Union[str, Any] = tokenizer.encode(__A,add_prefix_space=__A )
_lowerCamelCase : Optional[int] = rust_tokenizer.encode(__A )
self.assertListEqual(__A,__A )
# Testing the unknown token
_lowerCamelCase : Optional[int] = tokens + [rust_tokenizer.unk_token]
_lowerCamelCase : Optional[Any] = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__A ),__A )
def lowerCamelCase_ ( self : Tuple,*__A : Any,**__A : Any ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def lowerCamelCase_ ( self : int,__A : Optional[int]=1_5 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_lowerCamelCase : Tuple = self.rust_tokenizer_class.from_pretrained(__A,**__A )
# Simple input
_lowerCamelCase : Dict = "This is a simple input"
_lowerCamelCase : Any = ["This is a simple input 1", "This is a simple input 2"]
_lowerCamelCase : Tuple = ("This is a simple input", "This is a pair")
_lowerCamelCase : Tuple = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(__A,tokenizer_r.encode,__A,max_length=__A,padding="max_length" )
# Simple input
self.assertRaises(__A,tokenizer_r.encode_plus,__A,max_length=__A,padding="max_length" )
# Simple input
self.assertRaises(
__A,tokenizer_r.batch_encode_plus,__A,max_length=__A,padding="max_length",)
# Pair input
self.assertRaises(__A,tokenizer_r.encode,__A,max_length=__A,padding="max_length" )
# Pair input
self.assertRaises(__A,tokenizer_r.encode_plus,__A,max_length=__A,padding="max_length" )
# Pair input
self.assertRaises(
__A,tokenizer_r.batch_encode_plus,__A,max_length=__A,padding="max_length",)
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : str = CodeGenTokenizer.from_pretrained(self.tmpdirname,pad_token="<pad>" )
# Simple input
_lowerCamelCase : Tuple = "This is a simple input"
_lowerCamelCase : Dict = ["This is a simple input looooooooong", "This is a simple input"]
_lowerCamelCase : Dict = ("This is a simple input", "This is a pair")
_lowerCamelCase : Dict = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
_lowerCamelCase : Dict = tokenizer.pad_token_id
_lowerCamelCase : Dict = tokenizer(__A,padding="max_length",max_length=3_0,return_tensors="np" )
_lowerCamelCase : int = tokenizer(__A,padding=__A,truncate=__A,return_tensors="np" )
_lowerCamelCase : List[Any] = tokenizer(*__A,padding="max_length",max_length=6_0,return_tensors="np" )
_lowerCamelCase : Tuple = tokenizer(__A,padding=__A,truncate=__A,return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1],3_0 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1],3_3 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1],6_0 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1],5_2 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : List[Any] = "$$$"
_lowerCamelCase : Tuple = CodeGenTokenizer.from_pretrained(self.tmpdirname,bos_token=__A,add_bos_token=__A )
_lowerCamelCase : List[str] = "This is a simple input"
_lowerCamelCase : Optional[Any] = ["This is a simple input 1", "This is a simple input 2"]
_lowerCamelCase : Union[str, Any] = tokenizer.bos_token_id
_lowerCamelCase : Any = tokenizer(__A )
_lowerCamelCase : List[str] = tokenizer(__A )
self.assertEqual(out_s.input_ids[0],__A )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_lowerCamelCase : int = tokenizer.decode(out_s.input_ids )
_lowerCamelCase : str = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0],__A )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : int = CodeGenTokenizer.from_pretrained("Salesforce/codegen-350M-mono" )
_lowerCamelCase : Optional[Any] = "\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"
_lowerCamelCase : Dict = "\nif len_a > len_b: result = a\nelse: result = b"
_lowerCamelCase : Any = tokenizer.encode(__A )
_lowerCamelCase : str = ["^#", re.escape("<|endoftext|>" ), "^'''", "^\"\"\"", "\n\n\n"]
_lowerCamelCase : List[Any] = tokenizer.decode(__A,truncate_before_pattern=__A )
self.assertEqual(__A,__A )
def lowerCamelCase_ ( self : Any ):
pass
| 44 | 0 |
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : list[int] , _UpperCAmelCase : int ) -> bool:
__snake_case = len(_UpperCAmelCase )
__snake_case = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
__snake_case = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
__snake_case = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
__snake_case = subset[i - 1][j]
if arr[i - 1] <= j:
__snake_case = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 69 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class UpperCAmelCase__ :
def __init__( self : Any,__A : int=2,__A : Any=3,__A : Optional[int]=6_4,__A : Tuple=None ):
_lowerCamelCase : int = np.random.default_rng(__A )
_lowerCamelCase : List[str] = length
_lowerCamelCase : Optional[Any] = rng.normal(size=(length,) ).astype(np.floataa )
_lowerCamelCase : Optional[int] = a * self.x + b + rng.normal(scale=0.1,size=(length,) ).astype(np.floataa )
def __len__( self : Dict ):
return self.length
def __getitem__( self : str,__A : List[str] ):
return {"x": self.x[i], "y": self.y[i]}
class UpperCAmelCase__ ( torch.nn.Module ):
def __init__( self : Union[str, Any],__A : Optional[Any]=0,__A : Optional[int]=0,__A : Dict=False ):
super().__init__()
_lowerCamelCase : Tuple = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
_lowerCamelCase : List[str] = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
_lowerCamelCase : Optional[int] = True
def lowerCamelCase_ ( self : List[str],__A : Tuple=None ):
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
_lowerCamelCase : Optional[Any] = False
return x * self.a[0] + self.b[0]
class UpperCAmelCase__ ( torch.nn.Module ):
def __init__( self : Union[str, Any],__A : List[str]=0,__A : List[str]=0,__A : int=False ):
super().__init__()
_lowerCamelCase : Optional[int] = torch.nn.Parameter(torch.tensor(__A ).float() )
_lowerCamelCase : Dict = torch.nn.Parameter(torch.tensor(__A ).float() )
_lowerCamelCase : Tuple = True
def lowerCamelCase_ ( self : str,__A : List[Any]=None ):
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
_lowerCamelCase : Optional[Any] = False
return x * self.a + self.b
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : int = 16 ):
"""simple docstring"""
from datasets import load_dataset
from transformers import AutoTokenizer
_lowerCamelCase : Tuple = AutoTokenizer.from_pretrained("bert-base-cased" )
_lowerCamelCase : List[Any] = {"train": "tests/test_samples/MRPC/train.csv", "validation": "tests/test_samples/MRPC/dev.csv"}
_lowerCamelCase : int = load_dataset("csv" , data_files=_lowerCAmelCase )
_lowerCamelCase : Dict = datasets["train"].unique("label" )
_lowerCamelCase : Optional[Any] = {v: i for i, v in enumerate(_lowerCAmelCase )}
def tokenize_function(_lowerCAmelCase : int ):
# max_length=None => use the model max length (it's actually the default)
_lowerCamelCase : Optional[int] = tokenizer(
examples["sentence1"] , examples["sentence2"] , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase , padding="max_length" )
if "label" in examples:
_lowerCamelCase : str = [label_to_id[l] for l in examples["label"]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_lowerCamelCase : Optional[Any] = datasets.map(
_lowerCAmelCase , batched=_lowerCAmelCase , remove_columns=["sentence1", "sentence2", "label"] , )
def collate_fn(_lowerCAmelCase : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowerCAmelCase , padding="max_length" , max_length=128 , return_tensors="pt" )
return tokenizer.pad(_lowerCAmelCase , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
_lowerCamelCase : str = DataLoader(tokenized_datasets["train"] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=2 )
_lowerCamelCase : Optional[int] = DataLoader(tokenized_datasets["validation"] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=1 )
return train_dataloader, eval_dataloader
| 44 | 0 |
from __future__ import annotations
from collections.abc import Iterator
class A:
'''simple docstring'''
def __init__( self : Tuple , A_ : int ) -> None:
"""simple docstring"""
lowerCamelCase_ = value
lowerCamelCase_ = None
lowerCamelCase_ = None
class A:
'''simple docstring'''
def __init__( self : List[str] , A_ : Node ) -> None:
"""simple docstring"""
lowerCamelCase_ = tree
def a__ ( self : Optional[int] , A_ : Node | None ) -> int:
"""simple docstring"""
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : Tuple ) -> Iterator[int]:
"""simple docstring"""
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 70 |
'''simple docstring'''
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Optional[Any] = False, False, False
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = None
lowerCAmelCase_ = True
lowerCAmelCase_ = True
lowerCAmelCase_ = None
# Automatically constructed
lowerCAmelCase_ = "dict"
lowerCAmelCase_ = pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
lowerCAmelCase_ = field(default='Audio' , init=A , repr=A )
def __call__( self : Tuple ):
return self.pa_type
def lowerCamelCase_ ( self : Any,__A : Union[str, bytes, dict] ):
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("To support encoding audio data, please install 'soundfile'." ) from err
if isinstance(__A,__A ):
return {"bytes": None, "path": value}
elif isinstance(__A,__A ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
_lowerCamelCase : List[Any] = BytesIO()
sf.write(__A,value["array"],value["sampling_rate"],format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("pcm" ):
# "PCM" only has raw audio bytes
if value.get("sampling_rate" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object" )
if value.get("bytes" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
_lowerCamelCase : Dict = np.frombuffer(value["bytes"],dtype=np.intaa ).astype(np.floataa ) / 3_2_7_6_7
else:
_lowerCamelCase : str = np.memmap(value["path"],dtype="h",mode="r" ).astype(np.floataa ) / 3_2_7_6_7
_lowerCamelCase : Optional[int] = BytesIO(bytes() )
sf.write(__A,__A,value["sampling_rate"],format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f'An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.' )
def lowerCamelCase_ ( self : Optional[Any],__A : dict,__A : Optional[Dict[str, Union[str, bool, None]]] = None ):
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead." )
_lowerCamelCase , _lowerCamelCase : Optional[Any] = (value["path"], BytesIO(value["bytes"] )) if value["bytes"] is not None else (value["path"], None)
if path is None and file is None:
raise ValueError(f'An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'." ) from err
_lowerCamelCase : Tuple = xsplitext(__A )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
if file is None:
_lowerCamelCase : Tuple = token_per_repo_id or {}
_lowerCamelCase : Union[str, Any] = path.split("::" )[-1]
try:
_lowerCamelCase : str = string_to_dict(__A,config.HUB_DATASETS_URL )["repo_id"]
_lowerCamelCase : str = token_per_repo_id[repo_id]
except (ValueError, KeyError):
_lowerCamelCase : Any = None
with xopen(__A,"rb",use_auth_token=__A ) as f:
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = sf.read(__A )
else:
_lowerCamelCase , _lowerCamelCase : str = sf.read(__A )
_lowerCamelCase : List[str] = array.T
if self.mono:
_lowerCamelCase : List[str] = librosa.to_mono(__A )
if self.sampling_rate and self.sampling_rate != sampling_rate:
_lowerCamelCase : List[str] = librosa.resample(__A,orig_sr=__A,target_sr=self.sampling_rate )
_lowerCamelCase : Optional[Any] = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def lowerCamelCase_ ( self : Any ):
from .features import Value
if self.decode:
raise ValueError("Cannot flatten a decoded Audio feature." )
return {
"bytes": Value("binary" ),
"path": Value("string" ),
}
def lowerCamelCase_ ( self : List[str],__A : Union[pa.StringArray, pa.StructArray] ):
if pa.types.is_string(storage.type ):
_lowerCamelCase : Any = pa.array([None] * len(__A ),type=pa.binary() )
_lowerCamelCase : int = pa.StructArray.from_arrays([bytes_array, storage],["bytes", "path"],mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
_lowerCamelCase : Dict = pa.array([None] * len(__A ),type=pa.string() )
_lowerCamelCase : Any = pa.StructArray.from_arrays([storage, path_array],["bytes", "path"],mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("array" ):
_lowerCamelCase : Tuple = pa.array([Audio().encode_example(__A ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
_lowerCamelCase : Tuple = storage.field("bytes" )
else:
_lowerCamelCase : Any = pa.array([None] * len(__A ),type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
_lowerCamelCase : List[str] = storage.field("path" )
else:
_lowerCamelCase : Tuple = pa.array([None] * len(__A ),type=pa.string() )
_lowerCamelCase : Tuple = pa.StructArray.from_arrays([bytes_array, path_array],["bytes", "path"],mask=storage.is_null() )
return array_cast(__A,self.pa_type )
def lowerCamelCase_ ( self : str,__A : pa.StructArray ):
@no_op_if_value_is_null
def path_to_bytes(__A : Dict ):
with xopen(__A,"rb" ) as f:
_lowerCamelCase : Any = f.read()
return bytes_
_lowerCamelCase : int = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
],type=pa.binary(),)
_lowerCamelCase : str = pa.array(
[os.path.basename(__A ) if path is not None else None for path in storage.field("path" ).to_pylist()],type=pa.string(),)
_lowerCamelCase : Dict = pa.StructArray.from_arrays([bytes_array, path_array],["bytes", "path"],mask=bytes_array.is_null() )
return array_cast(__A,self.pa_type )
| 44 | 0 |
'''simple docstring'''
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _snake_case (__SCREAMING_SNAKE_CASE):
def __init__( self ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = False ,_snake_case = False ,_snake_case = None ,_snake_case = None ,**_snake_case ,):
super().__init__(
_snake_case ,split=_snake_case ,features=_snake_case ,cache_dir=_snake_case ,keep_in_memory=_snake_case ,streaming=_snake_case ,num_proc=_snake_case ,**_snake_case ,)
UpperCAmelCase_ : Tuple = field
UpperCAmelCase_ : List[Any] = path_or_paths if isinstance(_snake_case ,_snake_case ) else {self.split: path_or_paths}
UpperCAmelCase_ : Optional[int] = Json(
cache_dir=_snake_case ,data_files=_snake_case ,features=_snake_case ,field=_snake_case ,**_snake_case ,)
def UpperCamelCase__ ( self ):
# Build iterable dataset
if self.streaming:
UpperCAmelCase_ : List[str] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : int = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : int = None
self.builder.download_and_prepare(
download_config=_snake_case ,download_mode=_snake_case ,verification_mode=_snake_case ,base_path=_snake_case ,num_proc=self.num_proc ,)
UpperCAmelCase_ : Dict = self.builder.as_dataset(
split=self.split ,verification_mode=_snake_case ,in_memory=self.keep_in_memory )
return dataset
class _snake_case :
def __init__( self ,_snake_case ,_snake_case ,_snake_case = None ,_snake_case = None ,**_snake_case ,):
if num_proc is not None and num_proc <= 0:
raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' )
UpperCAmelCase_ : int = dataset
UpperCAmelCase_ : Union[str, Any] = path_or_buf
UpperCAmelCase_ : str = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
UpperCAmelCase_ : Dict = num_proc
UpperCAmelCase_ : Optional[Any] = "utf-8"
UpperCAmelCase_ : Optional[int] = to_json_kwargs
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = self.to_json_kwargs.pop("path_or_buf" ,_snake_case )
UpperCAmelCase_ : Tuple = self.to_json_kwargs.pop("orient" ,"records" )
UpperCAmelCase_ : Any = self.to_json_kwargs.pop("lines" ,True if orient == "records" else False )
UpperCAmelCase_ : Optional[int] = self.to_json_kwargs.pop("index" ,False if orient in ["split", "table"] else True )
UpperCAmelCase_ : int = self.to_json_kwargs.pop("compression" ,_snake_case )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f'''`datasets` currently does not support {compression} compression''' )
if isinstance(self.path_or_buf ,(str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf ,"wb" ,compression=_snake_case ) as buffer:
UpperCAmelCase_ : List[str] = self._write(file_obj=_snake_case ,orient=_snake_case ,lines=_snake_case ,index=_snake_case ,**self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
f'''The compression parameter is not supported when writing to a buffer, but compression={compression}'''
" was passed. Please provide a local path instead." )
UpperCAmelCase_ : Union[str, Any] = self._write(
file_obj=self.path_or_buf ,orient=_snake_case ,lines=_snake_case ,index=_snake_case ,**self.to_json_kwargs )
return written
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = args
UpperCAmelCase_ : List[str] = query_table(
table=self.dataset.data ,key=slice(_snake_case ,offset + self.batch_size ) ,indices=self.dataset._indices ,)
UpperCAmelCase_ : Optional[Any] = batch.to_pandas().to_json(
path_or_buf=_snake_case ,orient=_snake_case ,lines=_snake_case ,index=_snake_case ,**_snake_case )
if not json_str.endswith("\n" ):
json_str += "\n"
return json_str.encode(self.encoding )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,**_snake_case ,):
UpperCAmelCase_ : Optional[Any] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 ,len(self.dataset ) ,self.batch_size ) ,unit="ba" ,disable=not logging.is_progress_bar_enabled() ,desc="Creating json from Arrow format" ,):
UpperCAmelCase_ : Any = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(_snake_case )
else:
UpperCAmelCase_ , UpperCAmelCase_ : int = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json ,[(offset, orient, lines, index, to_json_kwargs) for offset in range(0 ,_snake_case ,_snake_case )] ,) ,total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size ,unit="ba" ,disable=not logging.is_progress_bar_enabled() ,desc="Creating json from Arrow format" ,):
written += file_obj.write(_snake_case )
return written
| 71 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : str = {
'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'glpn'
def __init__( self : Tuple,__A : Optional[int]=3,__A : Optional[int]=4,__A : str=[2, 2, 2, 2],__A : Union[str, Any]=[8, 4, 2, 1],__A : Tuple=[3_2, 6_4, 1_6_0, 2_5_6],__A : int=[7, 3, 3, 3],__A : str=[4, 2, 2, 2],__A : int=[1, 2, 5, 8],__A : List[Any]=[4, 4, 4, 4],__A : Optional[int]="gelu",__A : int=0.0,__A : Tuple=0.0,__A : Tuple=0.02,__A : Optional[int]=0.1,__A : Optional[int]=1e-6,__A : Optional[int]=6_4,__A : Optional[Any]=1_0,__A : Tuple=-1,**__A : List[str],):
super().__init__(**__A )
_lowerCamelCase : Tuple = num_channels
_lowerCamelCase : Union[str, Any] = num_encoder_blocks
_lowerCamelCase : Dict = depths
_lowerCamelCase : List[Any] = sr_ratios
_lowerCamelCase : str = hidden_sizes
_lowerCamelCase : Any = patch_sizes
_lowerCamelCase : Any = strides
_lowerCamelCase : Dict = mlp_ratios
_lowerCamelCase : int = num_attention_heads
_lowerCamelCase : List[Any] = hidden_act
_lowerCamelCase : str = hidden_dropout_prob
_lowerCamelCase : List[Any] = attention_probs_dropout_prob
_lowerCamelCase : Optional[int] = initializer_range
_lowerCamelCase : Union[str, Any] = drop_path_rate
_lowerCamelCase : str = layer_norm_eps
_lowerCamelCase : Tuple = decoder_hidden_size
_lowerCamelCase : int = max_depth
_lowerCamelCase : Dict = head_in_index
| 44 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCAmelCase : str = logging.get_logger(__name__)
_UpperCAmelCase : str = {
'''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/config.json''',
'''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/config.json''',
'''xlm-roberta-large-finetuned-conll02-dutch''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll02-spanish''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll03-english''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll03-german''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'''
),
}
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = 'xlm-roberta'
def __init__( self , snake_case_=3_05_22 , snake_case_=7_68 , snake_case_=12 , snake_case_=12 , snake_case_=30_72 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=5_12 , snake_case_=2 , snake_case_=0.02 , snake_case_=1E-12 , snake_case_=1 , snake_case_=0 , snake_case_=2 , snake_case_="absolute" , snake_case_=True , snake_case_=None , **snake_case_ , ):
super().__init__(pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ )
lowercase =vocab_size
lowercase =hidden_size
lowercase =num_hidden_layers
lowercase =num_attention_heads
lowercase =hidden_act
lowercase =intermediate_size
lowercase =hidden_dropout_prob
lowercase =attention_probs_dropout_prob
lowercase =max_position_embeddings
lowercase =type_vocab_size
lowercase =initializer_range
lowercase =layer_norm_eps
lowercase =position_embedding_type
lowercase =use_cache
lowercase =classifier_dropout
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
@property
def _A( self ):
if self.task == "multiple-choice":
lowercase ={0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowercase ={0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 72 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
UpperCAmelCase_ : str = logging.get_logger(__name__)
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = ['input_features', 'attention_mask']
def __init__( self : Any,__A : List[Any]=8_0,__A : Dict=1_6_0_0_0,__A : Tuple=0.0,__A : Dict=1_0,__A : int=2_5,__A : Union[str, Any]="hamming_window",__A : List[str]=32768.0,__A : Union[str, Any]=0.97,__A : str=1.0,__A : Union[str, Any]=True,__A : Tuple=True,__A : Optional[Any]=False,**__A : Optional[Any],):
super().__init__(feature_size=__A,sampling_rate=__A,padding_value=__A,**__A )
_lowerCamelCase : Dict = feature_size
_lowerCamelCase : List[str] = sampling_rate
_lowerCamelCase : Any = padding_value
_lowerCamelCase : Dict = hop_length
_lowerCamelCase : Tuple = win_length
_lowerCamelCase : str = frame_signal_scale
_lowerCamelCase : List[str] = preemphasis_coeff
_lowerCamelCase : List[str] = mel_floor
_lowerCamelCase : str = normalize_means
_lowerCamelCase : Any = normalize_vars
_lowerCamelCase : List[str] = win_function
_lowerCamelCase : Tuple = return_attention_mask
_lowerCamelCase : List[Any] = win_length * sampling_rate // 1_0_0_0
_lowerCamelCase : List[Any] = hop_length * sampling_rate // 1_0_0_0
_lowerCamelCase : Any = optimal_fft_length(self.sample_size )
_lowerCamelCase : Dict = (self.n_fft // 2) + 1
def lowerCamelCase_ ( self : Any,__A : np.array ):
if self.win_function == "hamming_window":
_lowerCamelCase : Any = window_function(window_length=self.sample_size,name=self.win_function,periodic=__A )
else:
_lowerCamelCase : Optional[int] = window_function(window_length=self.sample_size,name=self.win_function )
_lowerCamelCase : int = mel_filter_bank(
num_frequency_bins=self.n_freqs,num_mel_filters=self.feature_size,min_frequency=0.0,max_frequency=self.sampling_rate / 2.0,sampling_rate=self.sampling_rate,)
_lowerCamelCase : List[str] = spectrogram(
one_waveform * self.frame_signal_scale,window=__A,frame_length=self.sample_size,hop_length=self.sample_stride,fft_length=self.n_fft,center=__A,preemphasis=self.preemphasis_coeff,mel_filters=__A,mel_floor=self.mel_floor,log_mel="log",)
return msfc_features.T
def lowerCamelCase_ ( self : Optional[int],__A : List[str],__A : Dict,__A : int ):
# make sure we normalize float32 arrays
if self.normalize_means:
_lowerCamelCase : Optional[Any] = x[:input_length].mean(axis=0 )
_lowerCamelCase : Optional[int] = np.subtract(__A,__A )
if self.normalize_vars:
_lowerCamelCase : int = x[:input_length].std(axis=0 )
_lowerCamelCase : Any = np.divide(__A,__A )
if input_length < x.shape[0]:
_lowerCamelCase : Tuple = padding_value
# make sure array is in float32
_lowerCamelCase : Optional[int] = x.astype(np.floataa )
return x
def lowerCamelCase_ ( self : Any,__A : List[np.ndarray],__A : Optional[np.ndarray] = None ):
_lowerCamelCase : Optional[int] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(__A,__A,self.padding_value ) for x, n in zip(__A,__A )]
def __call__( self : Optional[Any],__A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]],__A : Union[bool, str, PaddingStrategy] = False,__A : Optional[int] = None,__A : bool = False,__A : Optional[int] = None,__A : Optional[bool] = None,__A : Optional[Union[str, TensorType]] = None,__A : Optional[int] = None,**__A : Optional[Any],):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
f' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'
f' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
_lowerCamelCase : List[str] = isinstance(__A,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
_lowerCamelCase : List[str] = is_batched_numpy or (
isinstance(__A,(list, tuple) ) and (isinstance(raw_speech[0],(np.ndarray, tuple, list) ))
)
if is_batched:
_lowerCamelCase : List[Any] = [np.asarray(__A,dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__A,np.ndarray ):
_lowerCamelCase : Dict = np.asarray(__A,dtype=np.floataa )
elif isinstance(__A,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_lowerCamelCase : Tuple = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_lowerCamelCase : Tuple = [raw_speech]
# extract fbank features
_lowerCamelCase : str = [self._extract_mfsc_features(__A ) for one_waveform in raw_speech]
# convert into correct format for padding
_lowerCamelCase : Union[str, Any] = BatchFeature({"input_features": features} )
_lowerCamelCase : List[Any] = self.pad(
__A,padding=__A,max_length=__A,truncation=__A,pad_to_multiple_of=__A,return_attention_mask=__A,**__A,)
# make sure list is in array format
_lowerCamelCase : Optional[Any] = padded_inputs.get("input_features" )
if isinstance(input_features[0],__A ):
_lowerCamelCase : int = [np.asarray(__A,dtype=np.floataa ) for feature in input_features]
_lowerCamelCase : Dict = padded_inputs.get("attention_mask" )
if attention_mask is not None:
_lowerCamelCase : Dict = [np.asarray(__A,dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
_lowerCamelCase : Dict = (
np.array(__A,dtype=np.intaa )
if self._get_padding_strategies(__A,max_length=__A ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
_lowerCamelCase : Tuple = self.normalize(
padded_inputs["input_features"],attention_mask=__A )
if return_tensors is not None:
_lowerCamelCase : Dict = padded_inputs.convert_to_tensors(__A )
return padded_inputs
| 44 | 0 |
from math import isqrt, loga
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = [True] * max_number
for i in range(2 , isqrt(max_number - 1) + 1):
if is_prime[i]:
for j in range(i**2 , _UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = False
return [i for i in range(2 , _UpperCAmelCase) if is_prime[i]]
def lowerCamelCase__ (_UpperCAmelCase = 80_0800 , _UpperCAmelCase = 80_0800):
SCREAMING_SNAKE_CASE = degree * loga(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = int(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = calculate_prime_numbers(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = len(_UpperCAmelCase) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left])
+ prime_numbers[left] * loga(prime_numbers[right])
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 73 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Dict = [
('bert.bert', 'visual_bert'),
('bert.cls', 'cls'),
('bert.classifier', 'cls'),
('token_type_embeddings_visual', 'visual_token_type_embeddings'),
('position_embeddings_visual', 'visual_position_embeddings'),
('projection', 'visual_projection'),
]
UpperCAmelCase_ : int = [
'nlvr2_coco_pre_trained.th',
'nlvr2_fine_tuned.th',
'nlvr2_pre_trained.th',
'vcr_coco_pre_train.th',
'vcr_fine_tune.th',
'vcr_pre_train.th',
'vqa_coco_pre_trained.th',
'vqa_fine_tuned.th',
'vqa_pre_trained.th',
]
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = torch.load(_lowerCAmelCase , map_location="cpu" )
return sd
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple=rename_keys_prefix ):
"""simple docstring"""
_lowerCamelCase : Any = OrderedDict()
_lowerCamelCase : str = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
_lowerCamelCase : Any = key
for name_pair in rename_keys_prefix:
_lowerCamelCase : Dict = new_key.replace(name_pair[0] , name_pair[1] )
_lowerCamelCase : Any = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
_lowerCamelCase : List[str] = new_d["cls.predictions.bias"]
return new_d
@torch.no_grad()
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : Dict ):
"""simple docstring"""
assert (
checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS
), F'The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'
# Get Config
if "pre" in checkpoint_path:
_lowerCamelCase : Optional[int] = "pretraining"
if "vcr" in checkpoint_path:
_lowerCamelCase : Union[str, Any] = {"visual_embedding_dim": 512}
elif "vqa_advanced" in checkpoint_path:
_lowerCamelCase : List[str] = {"visual_embedding_dim": 2048}
elif "vqa" in checkpoint_path:
_lowerCamelCase : int = {"visual_embedding_dim": 2048}
elif "nlvr" in checkpoint_path:
_lowerCamelCase : List[str] = {"visual_embedding_dim": 1024}
else:
raise NotImplementedError(F'No implementation found for `{checkpoint_path}`.' )
else:
if "vcr" in checkpoint_path:
_lowerCamelCase : Any = {"visual_embedding_dim": 512}
_lowerCamelCase : List[Any] = "multichoice"
elif "vqa_advanced" in checkpoint_path:
_lowerCamelCase : Tuple = {"visual_embedding_dim": 2048}
_lowerCamelCase : Dict = "vqa_advanced"
elif "vqa" in checkpoint_path:
_lowerCamelCase : Union[str, Any] = {"visual_embedding_dim": 2048, "num_labels": 3129}
_lowerCamelCase : Optional[int] = "vqa"
elif "nlvr" in checkpoint_path:
_lowerCamelCase : Tuple = {
"visual_embedding_dim": 1024,
"num_labels": 2,
}
_lowerCamelCase : Optional[Any] = "nlvr"
_lowerCamelCase : str = VisualBertConfig(**_lowerCAmelCase )
# Load State Dict
_lowerCamelCase : str = load_state_dict(_lowerCAmelCase )
_lowerCamelCase : List[str] = get_new_dict(_lowerCAmelCase , _lowerCAmelCase )
if model_type == "pretraining":
_lowerCamelCase : List[Any] = VisualBertForPreTraining(_lowerCAmelCase )
elif model_type == "vqa":
_lowerCamelCase : Dict = VisualBertForQuestionAnswering(_lowerCAmelCase )
elif model_type == "nlvr":
_lowerCamelCase : Tuple = VisualBertForVisualReasoning(_lowerCAmelCase )
elif model_type == "multichoice":
_lowerCamelCase : str = VisualBertForMultipleChoice(_lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
# Save Checkpoints
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('orig_checkpoint_path', type=str, help='A path to .th on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', type=str, help='Path to the output PyTorch model.')
UpperCAmelCase_ : Tuple = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 44 | 0 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = TFXLMRobertaModel.from_pretrained('''jplu/tf-xlm-roberta-base''' )
__SCREAMING_SNAKE_CASE : int = {
'''input_ids''': tf.convert_to_tensor([[0, 2646, 1_0269, 83, 9_9942, 2]] , dtype=tf.intaa ), # "My dog is cute"
'''attention_mask''': tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
__SCREAMING_SNAKE_CASE : Any = model(_A )['''last_hidden_state''']
__SCREAMING_SNAKE_CASE : Dict = tf.TensorShape((1, 6, 768) )
self.assertEqual(output.shape , _A )
# compare the actual values for a slice.
__SCREAMING_SNAKE_CASE : Dict = tf.convert_to_tensor(
[
[
[0.0_68_17_62, 0.10_89_44_51, 0.06_77_25_04],
[-0.06_42_36_68, 0.02_36_66_15, 0.04_32_93_44],
[-0.06_05_72_95, 0.09_97_41_35, -0.00_07_05_84],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 74 |
'''simple docstring'''
import functools
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : list[int] ):
"""simple docstring"""
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or not all(isinstance(_lowerCAmelCase , _lowerCAmelCase ) for day in days ):
raise ValueError("The parameter days should be a list of integers" )
if len(_lowerCAmelCase ) != 3 or not all(isinstance(_lowerCAmelCase , _lowerCAmelCase ) for cost in costs ):
raise ValueError("The parameter costs should be a list of three integers" )
if len(_lowerCAmelCase ) == 0:
return 0
if min(_lowerCAmelCase ) <= 0:
raise ValueError("All days elements should be greater than 0" )
if max(_lowerCAmelCase ) >= 366:
raise ValueError("All days elements should be less than 366" )
_lowerCamelCase : Union[str, Any] = set(_lowerCAmelCase )
@functools.cache
def dynamic_programming(_lowerCAmelCase : int ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44 | 0 |
'''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'''stable diffusion controlnet''',
'''0.22.0''',
'''Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.''',
standard_warn=False,
stacklevel=3,
)
| 75 |
'''simple docstring'''
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = SwinConfig.from_pretrained(
"microsoft/swin-tiny-patch4-window7-224" , out_features=["stage1", "stage2", "stage3", "stage4"] )
_lowerCamelCase : Dict = MaskFormerConfig(backbone_config=_lowerCAmelCase )
_lowerCamelCase : Tuple = "huggingface/label-files"
if "ade20k-full" in model_name:
# this should be ok
_lowerCamelCase : List[Any] = 847
_lowerCamelCase : str = "maskformer-ade20k-full-id2label.json"
elif "ade" in model_name:
# this should be ok
_lowerCamelCase : Optional[int] = 150
_lowerCamelCase : Union[str, Any] = "ade20k-id2label.json"
elif "coco-stuff" in model_name:
# this should be ok
_lowerCamelCase : Union[str, Any] = 171
_lowerCamelCase : str = "maskformer-coco-stuff-id2label.json"
elif "coco" in model_name:
# TODO
_lowerCamelCase : Optional[int] = 133
_lowerCamelCase : Any = "coco-panoptic-id2label.json"
elif "cityscapes" in model_name:
# this should be ok
_lowerCamelCase : str = 19
_lowerCamelCase : Tuple = "cityscapes-id2label.json"
elif "vistas" in model_name:
# this should be ok
_lowerCamelCase : List[Any] = 65
_lowerCamelCase : Optional[int] = "mapillary-vistas-id2label.json"
_lowerCamelCase : Any = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" ) , "r" ) )
_lowerCamelCase : Optional[int] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
return config
def A_ ( _lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : Any = []
# stem
# fmt: off
rename_keys.append(("backbone.patch_embed.proj.weight", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.proj.bias", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "model.pixel_level_module.encoder.model.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "model.pixel_level_module.encoder.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_index', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((F'backbone.layers.{i}.downsample.reduction.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((F'backbone.layers.{i}.downsample.norm.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((F'backbone.layers.{i}.downsample.norm.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append((F'backbone.norm{i}.weight', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.weight') )
rename_keys.append((F'backbone.norm{i}.bias', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.bias') )
# FPN
rename_keys.append(("sem_seg_head.layer_4.weight", "model.pixel_level_module.decoder.fpn.stem.0.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.weight", "model.pixel_level_module.decoder.fpn.stem.1.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.bias", "model.pixel_level_module.decoder.fpn.stem.1.bias") )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((F'sem_seg_head.adapter_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight') )
rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight') )
rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias') )
rename_keys.append(("sem_seg_head.mask_features.weight", "model.pixel_level_module.decoder.mask_projection.weight") )
rename_keys.append(("sem_seg_head.mask_features.bias", "model.pixel_level_module.decoder.mask_projection.bias") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias') )
# cross-attention out projection
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias') )
# MLP 1
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight', F'model.transformer_module.decoder.layers.{idx}.fc1.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias', F'model.transformer_module.decoder.layers.{idx}.fc1.bias') )
# MLP 2
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight', F'model.transformer_module.decoder.layers.{idx}.fc2.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias', F'model.transformer_module.decoder.layers.{idx}.fc2.bias') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias') )
# layernorm 3 (final layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias') )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.weight", "model.transformer_module.decoder.layernorm.weight") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.bias", "model.transformer_module.decoder.layernorm.bias") )
# heads on top
rename_keys.append(("sem_seg_head.predictor.query_embed.weight", "model.transformer_module.queries_embedder.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.weight", "model.transformer_module.input_projection.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.bias", "model.transformer_module.input_projection.bias") )
rename_keys.append(("sem_seg_head.predictor.class_embed.weight", "class_predictor.weight") )
rename_keys.append(("sem_seg_head.predictor.class_embed.bias", "class_predictor.bias") )
for i in range(3 ):
rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.weight', F'mask_embedder.{i}.0.weight') )
rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.bias', F'mask_embedder.{i}.0.bias') )
# fmt: on
return rename_keys
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : Tuple = dct.pop(_lowerCAmelCase )
_lowerCamelCase : str = val
def A_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : str = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_lowerCamelCase : int = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_lowerCamelCase : Union[str, Any] = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.weight' )
_lowerCamelCase : List[str] = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : Optional[int] = in_proj_weight[:dim, :]
_lowerCamelCase : Optional[int] = in_proj_bias[: dim]
_lowerCamelCase : List[str] = in_proj_weight[
dim : dim * 2, :
]
_lowerCamelCase : List[Any] = in_proj_bias[
dim : dim * 2
]
_lowerCamelCase : List[Any] = in_proj_weight[
-dim :, :
]
_lowerCamelCase : Union[str, Any] = in_proj_bias[-dim :]
# fmt: on
def A_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : int = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
_lowerCamelCase : Tuple = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight' )
_lowerCamelCase : Optional[int] = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : Optional[Any] = in_proj_weight[: hidden_size, :]
_lowerCamelCase : Optional[int] = in_proj_bias[:config.hidden_size]
_lowerCamelCase : str = in_proj_weight[hidden_size : hidden_size * 2, :]
_lowerCamelCase : Dict = in_proj_bias[hidden_size : hidden_size * 2]
_lowerCamelCase : Any = in_proj_weight[-hidden_size :, :]
_lowerCamelCase : Any = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
_lowerCamelCase : Optional[int] = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight' )
_lowerCamelCase : List[Any] = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : Tuple = in_proj_weight[: hidden_size, :]
_lowerCamelCase : str = in_proj_bias[:config.hidden_size]
_lowerCamelCase : str = in_proj_weight[hidden_size : hidden_size * 2, :]
_lowerCamelCase : Optional[int] = in_proj_bias[hidden_size : hidden_size * 2]
_lowerCamelCase : int = in_proj_weight[-hidden_size :, :]
_lowerCamelCase : Optional[Any] = in_proj_bias[-hidden_size :]
# fmt: on
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : List[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : Optional[Any] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : bool = False ):
"""simple docstring"""
_lowerCamelCase : Tuple = get_maskformer_config(_lowerCAmelCase )
# load original state_dict
with open(_lowerCAmelCase , "rb" ) as f:
_lowerCamelCase : List[Any] = pickle.load(_lowerCAmelCase )
_lowerCamelCase : Optional[Any] = data["model"]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
_lowerCamelCase : List[Any] = create_rename_keys(_lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_swin_q_k_v(_lowerCAmelCase , config.backbone_config )
read_in_decoder_q_k_v(_lowerCAmelCase , _lowerCAmelCase )
# update to torch tensors
for key, value in state_dict.items():
_lowerCamelCase : Dict = torch.from_numpy(_lowerCAmelCase )
# load 🤗 model
_lowerCamelCase : int = MaskFormerForInstanceSegmentation(_lowerCAmelCase )
model.eval()
for name, param in model.named_parameters():
print(_lowerCAmelCase , param.shape )
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(_lowerCAmelCase ) == 0, F'Unexpected keys: {unexpected_keys}'
# verify results
_lowerCamelCase : Any = prepare_img()
if "vistas" in model_name:
_lowerCamelCase : Any = 65
elif "cityscapes" in model_name:
_lowerCamelCase : Optional[Any] = 65535
else:
_lowerCamelCase : str = 255
_lowerCamelCase : List[str] = True if "ade" in model_name else False
_lowerCamelCase : Union[str, Any] = MaskFormerImageProcessor(ignore_index=_lowerCAmelCase , reduce_labels=_lowerCAmelCase )
_lowerCamelCase : int = image_processor(_lowerCAmelCase , return_tensors="pt" )
_lowerCamelCase : Tuple = model(**_lowerCAmelCase )
print("Logits:" , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
_lowerCamelCase : Tuple = torch.tensor(
[[3.6_3_5_3, -4.4_7_7_0, -2.6_0_6_5], [0.5_0_8_1, -4.2_3_9_4, -3.5_3_4_3], [2.1_9_0_9, -5.0_3_5_3, -1.9_3_2_3]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , _lowerCAmelCase , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F'Saving model and image processor to {pytorch_dump_folder_path}' )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
image_processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
print("Pushing model and image processor to the hub..." )
model.push_to_hub(F'nielsr/{model_name}' )
image_processor.push_to_hub(F'nielsr/{model_name}' )
if __name__ == "__main__":
UpperCAmelCase_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='maskformer-swin-tiny-ade',
type=str,
help=('Name of the MaskFormer model you\'d like to convert',),
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl',
type=str,
help='Path to the original state dict (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
UpperCAmelCase_ : int = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 44 | 0 |
"""simple docstring"""
a_ = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
a_ = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
a_ = {
0: 'Sunday',
1: 'Monday',
2: 'Tuesday',
3: 'Wednesday',
4: 'Thursday',
5: 'Friday',
6: 'Saturday',
}
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
assert len(str(__UpperCamelCase ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
__lowercase : Union[str, Any] = year // 1_00
__lowercase : Union[str, Any] = (5 * (century % 4) + 2) % 7
__lowercase : int = year % 1_00
__lowercase : Optional[int] = centurian % 12
__lowercase : str = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
__lowercase : List[Any] = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 4_00) == 0)
else DOOMSDAY_LEAP[month - 1]
)
__lowercase : int = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 76 |
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = range(2, 20 + 1)
UpperCAmelCase_ : str = [10**k for k in range(ks[-1] + 1)]
UpperCAmelCase_ : dict[int, dict[int, list[list[int]]]] = {}
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = sum(a_i[j] for j in range(_lowerCAmelCase , len(_lowerCAmelCase ) ) )
_lowerCamelCase : List[str] = sum(a_i[j] * base[j] for j in range(min(len(_lowerCAmelCase ) , _lowerCAmelCase ) ) )
_lowerCamelCase , _lowerCamelCase : int = 0, 0
_lowerCamelCase : Dict = n - i
_lowerCamelCase : int = memo.get(_lowerCAmelCase )
if sub_memo is not None:
_lowerCamelCase : List[str] = sub_memo.get(_lowerCAmelCase )
if jumps is not None and len(_lowerCAmelCase ) > 0:
# find and make the largest jump without going over
_lowerCamelCase : List[Any] = -1
for _k in range(len(_lowerCAmelCase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
_lowerCamelCase : Any = _k
break
if max_jump >= 0:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str = jumps[max_jump]
# since the difference between jumps is cached, add c
_lowerCamelCase : str = diff + c
for j in range(min(_lowerCAmelCase , len(_lowerCAmelCase ) ) ):
_lowerCamelCase , _lowerCamelCase : List[Any] = divmod(_lowerCAmelCase , 10 )
if new_c > 0:
add(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
_lowerCamelCase : int = []
else:
_lowerCamelCase : Tuple = {c: []}
_lowerCamelCase : Any = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
_lowerCamelCase , _lowerCamelCase : Optional[int] = next_term(_lowerCAmelCase , k - 1 , i + dn , _lowerCAmelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
_lowerCamelCase , _lowerCamelCase : List[str] = compute(_lowerCAmelCase , _lowerCAmelCase , i + dn , _lowerCAmelCase )
diff += _diff
dn += terms_jumped
_lowerCamelCase : List[str] = sub_memo[c]
# keep jumps sorted by # of terms skipped
_lowerCamelCase : int = 0
while j < len(_lowerCAmelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(_lowerCAmelCase , (diff, dn, k) )
return (diff, dn)
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Any , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any] ):
"""simple docstring"""
if i >= n:
return 0, i
if k > len(_lowerCAmelCase ):
a_i.extend([0 for _ in range(k - len(_lowerCAmelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
_lowerCamelCase : List[str] = i
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Any = 0, 0, 0
for j in range(len(_lowerCAmelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
_lowerCamelCase : int = ds_c + ds_b
diff += addend
_lowerCamelCase : List[str] = 0
for j in range(_lowerCAmelCase ):
_lowerCamelCase : List[Any] = a_i[j] + addend
_lowerCamelCase , _lowerCamelCase : Any = divmod(_lowerCAmelCase , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return diff, i - start_i
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : List[Any] ):
"""simple docstring"""
for j in range(_lowerCAmelCase , len(_lowerCAmelCase ) ):
_lowerCamelCase : Tuple = digits[j] + addend
if s >= 10:
_lowerCamelCase , _lowerCamelCase : Optional[int] = divmod(_lowerCAmelCase , 10 )
_lowerCamelCase : Any = addend // 10 + quotient
else:
_lowerCamelCase : Tuple = s
_lowerCamelCase : List[Any] = addend // 10
if addend == 0:
break
while addend > 0:
_lowerCamelCase , _lowerCamelCase : str = divmod(_lowerCAmelCase , 10 )
digits.append(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : int = 10**15 ):
"""simple docstring"""
_lowerCamelCase : Tuple = [1]
_lowerCamelCase : List[Any] = 1
_lowerCamelCase : List[str] = 0
while True:
_lowerCamelCase , _lowerCamelCase : Dict = next_term(_lowerCAmelCase , 20 , i + dn , _lowerCAmelCase )
dn += terms_jumped
if dn == n - i:
break
_lowerCamelCase : Optional[Any] = 0
for j in range(len(_lowerCAmelCase ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f'''{solution() = }''')
| 44 | 0 |
"""simple docstring"""
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
A = """__DUMMY_TRANSFORMERS_USER__"""
A = """Dummy User"""
A = """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"""
A = """https://hub-ci.huggingface.co"""
A = CI_HUB_ENDPOINT + """/datasets/{repo_id}/resolve/{revision}/{path}"""
A = CI_HUB_ENDPOINT + """/{repo_id}/resolve/{revision}/{filename}"""
A = Path("""~/.huggingface/hub_ci_token""").expanduser()
@pytest.fixture
def _UpperCamelCase ( UpperCamelCase ) -> Optional[Any]:
"""simple docstring"""
monkeypatch.setattr(
"huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE" , UpperCamelCase )
@pytest.fixture
def _UpperCamelCase ( UpperCamelCase ) -> Any:
"""simple docstring"""
monkeypatch.setattr("datasets.config.HF_ENDPOINT" , UpperCamelCase )
monkeypatch.setattr("datasets.config.HUB_DATASETS_URL" , UpperCamelCase )
@pytest.fixture
def _UpperCamelCase ( UpperCamelCase ) -> List[Any]:
"""simple docstring"""
monkeypatch.setattr("huggingface_hub.hf_api.HfFolder.path_token" , UpperCamelCase )
@pytest.fixture
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
HfFolder.save_token(UpperCamelCase )
yield
HfFolder.delete_token()
@pytest.fixture(scope="session" )
def _UpperCamelCase ( ) -> Tuple:
"""simple docstring"""
return HfApi(endpoint=UpperCamelCase )
@pytest.fixture(scope="session" )
def _UpperCamelCase ( UpperCamelCase ) -> Tuple:
"""simple docstring"""
__UpperCAmelCase : int = HfFolder.get_token()
HfFolder.save_token(UpperCamelCase )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(UpperCamelCase )
@pytest.fixture
def _UpperCamelCase ( UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
def _cleanup_repo(UpperCamelCase ):
hf_api.delete_repo(UpperCamelCase , token=UpperCamelCase , repo_type="dataset" )
return _cleanup_repo
@pytest.fixture
def _UpperCamelCase ( UpperCamelCase ) -> Dict:
"""simple docstring"""
@contextmanager
def _temporary_repo(UpperCamelCase ):
try:
yield repo_id
finally:
cleanup_repo(UpperCamelCase )
return _temporary_repo
@pytest.fixture(scope="session" )
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int:
"""simple docstring"""
__UpperCAmelCase : str = f"repo_txt_data-{int(time.time() * 1_0e3 )}"
__UpperCAmelCase : List[Any] = f"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(UpperCamelCase , token=UpperCamelCase , repo_type="dataset" , private=UpperCamelCase )
hf_api.upload_file(
token=UpperCamelCase , path_or_fileobj=str(UpperCamelCase ) , path_in_repo="data/text_data.txt" , repo_id=UpperCamelCase , repo_type="dataset" , )
yield repo_id
try:
hf_api.delete_repo(UpperCamelCase , token=UpperCamelCase , repo_type="dataset" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="session" )
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
__UpperCAmelCase : Dict = f"repo_zipped_txt_data-{int(time.time() * 1_0e3 )}"
__UpperCAmelCase : Dict = f"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(UpperCamelCase , token=UpperCamelCase , repo_type="dataset" , private=UpperCamelCase )
hf_api.upload_file(
token=UpperCamelCase , path_or_fileobj=str(UpperCamelCase ) , path_in_repo="data.zip" , repo_id=UpperCamelCase , repo_type="dataset" , )
yield repo_id
try:
hf_api.delete_repo(UpperCamelCase , token=UpperCamelCase , repo_type="dataset" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
"""simple docstring"""
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="session" )
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
__UpperCAmelCase : Any = f"repo_zipped_img_data-{int(time.time() * 1_0e3 )}"
__UpperCAmelCase : List[Any] = f"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(UpperCamelCase , token=UpperCamelCase , repo_type="dataset" , private=UpperCamelCase )
hf_api.upload_file(
token=UpperCamelCase , path_or_fileobj=str(UpperCamelCase ) , path_in_repo="data.zip" , repo_id=UpperCamelCase , repo_type="dataset" , )
yield repo_id
try:
hf_api.delete_repo(UpperCamelCase , token=UpperCamelCase , repo_type="dataset" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Any:
"""simple docstring"""
return hf_private_dataset_repo_zipped_img_data_
| 77 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
UpperCAmelCase_ : Any = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
lowerCAmelCase_ = field(default=A , metadata={'help': 'Whether tp freeze the encoder.'} )
lowerCAmelCase_ = field(default=A , metadata={'help': 'Whether to freeze the embeddings.'} )
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
lowerCAmelCase_ = field(
default='summarization' , metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'} , )
lowerCAmelCase_ = field(
default=1024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase_ = field(
default=128 , metadata={
'help': (
'The maximum total sequence length for target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase_ = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for validation target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded. '
'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '
'during ``evaluate`` and ``predict``.'
)
} , )
lowerCAmelCase_ = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for test target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase_ = field(default=-1 , metadata={'help': '# training examples. -1 means use all.'} )
lowerCAmelCase_ = field(default=-1 , metadata={'help': '# validation examples. -1 means use all.'} )
lowerCAmelCase_ = field(default=-1 , metadata={'help': '# test examples. -1 means use all.'} )
lowerCAmelCase_ = field(default=A , metadata={'help': 'Source language id for translation.'} )
lowerCAmelCase_ = field(default=A , metadata={'help': 'Target language id for translation.'} )
lowerCAmelCase_ = field(default=A , metadata={'help': '# num_beams to use for evaluation.'} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'} , )
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Any ):
"""simple docstring"""
logger.info(F'***** {split} metrics *****' )
for key in sorted(metrics.keys() ):
logger.info(F' {key} = {metrics[key]}' )
save_json(_lowerCAmelCase , os.path.join(_lowerCAmelCase , F'{split}_results.json' ) )
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : str = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Optional[Any] = parser.parse_args_into_dataclasses()
check_output_dir(_lowerCAmelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s" , _lowerCAmelCase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowerCamelCase : Optional[int] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_lowerCamelCase : Tuple = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
assert hasattr(_lowerCAmelCase , _lowerCAmelCase ), F'({config.__class__.__name__}) doesn\'t have a `{p}` attribute'
setattr(_lowerCAmelCase , _lowerCAmelCase , getattr(_lowerCAmelCase , _lowerCAmelCase ) )
_lowerCamelCase : List[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_lowerCamelCase : int = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=".ckpt" in model_args.model_name_or_path , config=_lowerCAmelCase , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(_lowerCAmelCase , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
_lowerCamelCase : List[Any] = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(_lowerCAmelCase , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCamelCase : Any = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
_lowerCamelCase : int = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(_lowerCAmelCase )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
_lowerCamelCase : int = SeqaSeqDataset
# Get datasets
_lowerCamelCase : Tuple = (
dataset_class(
_lowerCAmelCase , type_path="train" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_train
else None
)
_lowerCamelCase : List[Any] = (
dataset_class(
_lowerCAmelCase , type_path="val" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
_lowerCamelCase : Optional[int] = (
dataset_class(
_lowerCAmelCase , type_path="test" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
_lowerCamelCase : int = (
build_compute_metrics_fn(data_args.task , _lowerCAmelCase ) if training_args.predict_with_generate else None
)
_lowerCamelCase : List[Any] = SeqaSeqTrainer(
model=_lowerCAmelCase , args=_lowerCAmelCase , data_args=_lowerCAmelCase , train_dataset=_lowerCAmelCase , eval_dataset=_lowerCAmelCase , data_collator=SeqaSeqDataCollator(
_lowerCAmelCase , _lowerCAmelCase , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=_lowerCAmelCase , tokenizer=_lowerCAmelCase , )
_lowerCamelCase : Optional[Any] = {}
# Training
if training_args.do_train:
logger.info("*** Train ***" )
_lowerCamelCase : Optional[Any] = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
_lowerCamelCase : int = train_result.metrics
_lowerCamelCase : Optional[int] = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("train" , _lowerCAmelCase , training_args.output_dir )
all_metrics.update(_lowerCAmelCase )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
_lowerCamelCase : Optional[Any] = trainer.evaluate(metric_key_prefix="val" )
_lowerCamelCase : Dict = data_args.n_val
_lowerCamelCase : List[Any] = round(metrics["val_loss"] , 4 )
if trainer.is_world_process_zero():
handle_metrics("val" , _lowerCAmelCase , training_args.output_dir )
all_metrics.update(_lowerCAmelCase )
if training_args.do_predict:
logger.info("*** Predict ***" )
_lowerCamelCase : Any = trainer.predict(test_dataset=_lowerCAmelCase , metric_key_prefix="test" )
_lowerCamelCase : Dict = test_output.metrics
_lowerCamelCase : Optional[int] = data_args.n_test
if trainer.is_world_process_zero():
_lowerCamelCase : int = round(metrics["test_loss"] , 4 )
handle_metrics("test" , _lowerCAmelCase , training_args.output_dir )
all_metrics.update(_lowerCAmelCase )
if training_args.predict_with_generate:
_lowerCamelCase : List[str] = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase )
_lowerCamelCase : Any = lmap(str.strip , _lowerCAmelCase )
write_txt_file(_lowerCAmelCase , os.path.join(training_args.output_dir , "test_generations.txt" ) )
if trainer.is_world_process_zero():
save_json(_lowerCAmelCase , os.path.join(training_args.output_dir , "all_results.json" ) )
return all_metrics
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 44 | 0 |
'''simple docstring'''
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def lowerCAmelCase_ ( snake_case_ : Union[dict, list, tuple, torch.Tensor] ) -> List[Tuple[int, ...]]:
'''simple docstring'''
UpperCAmelCase_ = []
if isinstance(snake_case_ , snake_case_ ):
for v in tree.values():
shapes.extend(_fetch_dims(snake_case_ ) )
elif isinstance(snake_case_ , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(snake_case_ ) )
elif isinstance(snake_case_ , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError("Not supported" )
return shapes
@torch.jit.ignore
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : Tuple[int, ...] ) -> Tuple[int, ...]:
'''simple docstring'''
UpperCAmelCase_ = []
for d in reversed(snake_case_ ):
idx.append(flat_idx % d )
UpperCAmelCase_ = flat_idx // d
return tuple(reversed(snake_case_ ) )
@torch.jit.ignore
def lowerCAmelCase_ ( snake_case_ : Sequence[int] , snake_case_ : Sequence[int] , snake_case_ : Sequence[int] , snake_case_ : Optional[Sequence[bool]] = None , snake_case_ : Optional[Sequence[bool]] = None , ) -> List[Tuple[slice, ...]]:
'''simple docstring'''
def reduce_edge_list(snake_case_ : List[bool] ) -> None:
UpperCAmelCase_ = True
for i in range(len(snake_case_ ) ):
UpperCAmelCase_ = -1 * (i + 1)
l[reversed_idx] &= tally
UpperCAmelCase_ = l[reversed_idx]
if start_edges is None:
UpperCAmelCase_ = [s == 0 for s in start]
reduce_edge_list(snake_case_ )
if end_edges is None:
UpperCAmelCase_ = [e == (d - 1) for e, d in zip(snake_case_ , snake_case_ )]
reduce_edge_list(snake_case_ )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(snake_case_ ) == 0:
return [()]
elif len(snake_case_ ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
UpperCAmelCase_ = []
UpperCAmelCase_ = []
# Dimensions common to start and end can be selected directly
for s, e in zip(snake_case_ , snake_case_ ):
if s == e:
path_list.append(slice(snake_case_ , s + 1 ) )
else:
break
UpperCAmelCase_ = tuple(snake_case_ )
UpperCAmelCase_ = len(snake_case_ )
# start == end, and we're done
if divergence_idx == len(snake_case_ ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCAmelCase_ = start[divergence_idx]
return tuple(
path + (slice(snake_case_ , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCAmelCase_ = end[divergence_idx]
return tuple(
path + (slice(snake_case_ , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
UpperCAmelCase_ = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def lowerCAmelCase_ ( snake_case_ : torch.Tensor , snake_case_ : int , snake_case_ : int , snake_case_ : int ) -> torch.Tensor:
'''simple docstring'''
UpperCAmelCase_ = t.shape[:no_batch_dims]
UpperCAmelCase_ = list(_flat_idx_to_idx(snake_case_ , snake_case_ ) )
# _get_minimal_slice_set is inclusive
UpperCAmelCase_ = list(_flat_idx_to_idx(flat_end - 1 , snake_case_ ) )
# Get an ordered list of slices to perform
UpperCAmelCase_ = _get_minimal_slice_set(
snake_case_ , snake_case_ , snake_case_ , )
UpperCAmelCase_ = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def lowerCAmelCase_ ( snake_case_ : Callable , snake_case_ : Dict[str, Any] , snake_case_ : int , snake_case_ : int , snake_case_ : bool = False , snake_case_ : Any = None , snake_case_ : bool = False , ) -> Any:
'''simple docstring'''
if not (len(snake_case_ ) > 0):
raise ValueError("Must provide at least one input" )
UpperCAmelCase_ = [shape[:no_batch_dims] for shape in _fetch_dims(snake_case_ )]
UpperCAmelCase_ = tuple([max(snake_case_ ) for s in zip(*snake_case_ )] )
def _prep_inputs(snake_case_ : torch.Tensor ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
UpperCAmelCase_ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
UpperCAmelCase_ = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
UpperCAmelCase_ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
UpperCAmelCase_ = tensor_tree_map(_prep_inputs , snake_case_ )
UpperCAmelCase_ = None
if _out is not None:
UpperCAmelCase_ = tensor_tree_map(lambda snake_case_ : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
UpperCAmelCase_ = 1
for d in orig_batch_dims:
flat_batch_dim *= d
UpperCAmelCase_ = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(snake_case_ : torch.Tensor ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
UpperCAmelCase_ = 0
UpperCAmelCase_ = prepped_outputs
for _ in range(snake_case_ ):
# Chunk the input
if not low_mem:
UpperCAmelCase_ = _select_chunk
else:
UpperCAmelCase_ = partial(
_chunk_slice , flat_start=snake_case_ , flat_end=min(snake_case_ , i + chunk_size ) , no_batch_dims=len(snake_case_ ) , )
UpperCAmelCase_ = tensor_tree_map(snake_case_ , snake_case_ )
# Run the layer on the chunk
UpperCAmelCase_ = layer(**snake_case_ )
# Allocate space for the output
if out is None:
UpperCAmelCase_ = tensor_tree_map(lambda snake_case_ : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , snake_case_ )
# Put the chunk in its pre-allocated space
if isinstance(snake_case_ , snake_case_ ):
def assign(snake_case_ : dict , snake_case_ : dict ) -> None:
for k, v in da.items():
if isinstance(snake_case_ , snake_case_ ):
assign(snake_case_ , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
UpperCAmelCase_ = da[k]
assign(snake_case_ , snake_case_ )
elif isinstance(snake_case_ , snake_case_ ):
for xa, xa in zip(snake_case_ , snake_case_ ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
UpperCAmelCase_ = xa
elif isinstance(snake_case_ , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
UpperCAmelCase_ = output_chunk
else:
raise ValueError("Not supported" )
i += chunk_size
UpperCAmelCase_ = tensor_tree_map(lambda snake_case_ : t.view(orig_batch_dims + t.shape[1:] ) , snake_case_ )
return out
class __A :
def __init__(self : Dict , __a : int = 512 , ):
UpperCAmelCase_ = max_chunk_size
UpperCAmelCase_ = None
UpperCAmelCase_ = None
def _lowercase (self : List[Any] , __a : Callable , __a : tuple , __a : int ):
logging.info("Tuning chunk size..." )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
UpperCAmelCase_ = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
UpperCAmelCase_ = [c for c in candidates if c > min_chunk_size]
UpperCAmelCase_ = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(__a : int ) -> bool:
try:
with torch.no_grad():
fn(*__a , chunk_size=__a )
return True
except RuntimeError:
return False
UpperCAmelCase_ = 0
UpperCAmelCase_ = len(__a ) - 1
while i > min_viable_chunk_size_index:
UpperCAmelCase_ = test_chunk_size(candidates[i] )
if not viable:
UpperCAmelCase_ = (min_viable_chunk_size_index + i) // 2
else:
UpperCAmelCase_ = i
UpperCAmelCase_ = (i + len(__a ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def _lowercase (self : int , __a : Iterable , __a : Iterable ):
UpperCAmelCase_ = True
for aa, aa in zip(__a , __a ):
assert type(__a ) == type(__a )
if isinstance(__a , (list, tuple) ):
consistent &= self._compare_arg_caches(__a , __a )
elif isinstance(__a , __a ):
UpperCAmelCase_ = [v for _, v in sorted(aa.items() , key=lambda __a : x[0] )]
UpperCAmelCase_ = [v for _, v in sorted(aa.items() , key=lambda __a : x[0] )]
consistent &= self._compare_arg_caches(__a , __a )
else:
consistent &= aa == aa
return consistent
def _lowercase (self : List[str] , __a : Callable , __a : tuple , __a : int , ):
UpperCAmelCase_ = True
UpperCAmelCase_ = tree_map(lambda __a : a.shape if isinstance(__a , torch.Tensor ) else a , __a , __a )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(__a )
UpperCAmelCase_ = self._compare_arg_caches(self.cached_arg_data , __a )
else:
# Otherwise, we can reuse the precomputed value
UpperCAmelCase_ = False
if not consistent:
UpperCAmelCase_ = self._determine_favorable_chunk_size(
__a , __a , __a , )
UpperCAmelCase_ = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 78 |
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase__ :
def __init__( self : List[Any],__A : str,__A : List[str]=1_3,__A : str=3_2,__A : Tuple=2,__A : Any=3,__A : Dict=1_6,__A : Dict=[3_2, 6_4, 1_2_8],__A : List[str]=[1, 2, 1],__A : str=[2, 2, 4],__A : Optional[int]=2,__A : Dict=2.0,__A : str=True,__A : Tuple=0.0,__A : int=0.0,__A : List[str]=0.1,__A : Any="gelu",__A : List[Any]=False,__A : Optional[Any]=True,__A : List[str]=0.02,__A : Tuple=1e-5,__A : Any=True,__A : Tuple=None,__A : Tuple=True,__A : Tuple=1_0,__A : List[Any]=8,__A : Optional[int]=["stage1", "stage2"],__A : int=[1, 2],):
_lowerCamelCase : List[Any] = parent
_lowerCamelCase : Optional[Any] = batch_size
_lowerCamelCase : Optional[int] = image_size
_lowerCamelCase : int = patch_size
_lowerCamelCase : Optional[Any] = num_channels
_lowerCamelCase : int = embed_dim
_lowerCamelCase : int = hidden_sizes
_lowerCamelCase : List[Any] = depths
_lowerCamelCase : Any = num_heads
_lowerCamelCase : List[str] = window_size
_lowerCamelCase : str = mlp_ratio
_lowerCamelCase : Any = qkv_bias
_lowerCamelCase : str = hidden_dropout_prob
_lowerCamelCase : str = attention_probs_dropout_prob
_lowerCamelCase : List[str] = drop_path_rate
_lowerCamelCase : str = hidden_act
_lowerCamelCase : Union[str, Any] = use_absolute_embeddings
_lowerCamelCase : List[Any] = patch_norm
_lowerCamelCase : Tuple = layer_norm_eps
_lowerCamelCase : str = initializer_range
_lowerCamelCase : Optional[int] = is_training
_lowerCamelCase : Tuple = scope
_lowerCamelCase : List[Any] = use_labels
_lowerCamelCase : int = type_sequence_label_size
_lowerCamelCase : Tuple = encoder_stride
_lowerCamelCase : Any = out_features
_lowerCamelCase : Any = out_indices
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase : List[Any] = None
if self.use_labels:
_lowerCamelCase : str = ids_tensor([self.batch_size],self.type_sequence_label_size )
_lowerCamelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self : Union[str, Any] ):
return FocalNetConfig(
image_size=self.image_size,patch_size=self.patch_size,num_channels=self.num_channels,embed_dim=self.embed_dim,hidden_sizes=self.hidden_sizes,depths=self.depths,num_heads=self.num_heads,window_size=self.window_size,mlp_ratio=self.mlp_ratio,qkv_bias=self.qkv_bias,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,drop_path_rate=self.drop_path_rate,hidden_act=self.hidden_act,use_absolute_embeddings=self.use_absolute_embeddings,path_norm=self.patch_norm,layer_norm_eps=self.layer_norm_eps,initializer_range=self.initializer_range,encoder_stride=self.encoder_stride,out_features=self.out_features,out_indices=self.out_indices,)
def lowerCamelCase_ ( self : int,__A : Union[str, Any],__A : Tuple,__A : List[Any] ):
_lowerCamelCase : Optional[Any] = FocalNetModel(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : Optional[Any] = model(__A )
_lowerCamelCase : Optional[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_lowerCamelCase : Union[str, Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, expected_seq_len, expected_dim) )
def lowerCamelCase_ ( self : int,__A : Optional[int],__A : int,__A : Optional[int] ):
_lowerCamelCase : Any = FocalNetBackbone(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : List[str] = model(__A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ),len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ),[self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ),len(config.out_features ) )
self.parent.assertListEqual(model.channels,config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
_lowerCamelCase : List[str] = None
_lowerCamelCase : List[str] = FocalNetBackbone(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : str = model(__A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ),1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ),[self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ),1 )
self.parent.assertListEqual(model.channels,[config.hidden_sizes[-1]] )
def lowerCamelCase_ ( self : Optional[int],__A : Optional[int],__A : Dict,__A : Dict ):
_lowerCamelCase : List[Any] = FocalNetForMaskedImageModeling(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : List[str] = model(__A )
self.parent.assertEqual(
result.reconstruction.shape,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_lowerCamelCase : Dict = 1
_lowerCamelCase : Any = FocalNetForMaskedImageModeling(__A )
model.to(__A )
model.eval()
_lowerCamelCase : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase : Optional[int] = model(__A )
self.parent.assertEqual(result.reconstruction.shape,(self.batch_size, 1, self.image_size, self.image_size) )
def lowerCamelCase_ ( self : List[Any],__A : Union[str, Any],__A : List[Any],__A : Optional[Any] ):
_lowerCamelCase : Union[str, Any] = self.type_sequence_label_size
_lowerCamelCase : Optional[Any] = FocalNetForImageClassification(__A )
model.to(__A )
model.eval()
_lowerCamelCase : Optional[int] = model(__A,labels=__A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCamelCase : str = 1
_lowerCamelCase : str = FocalNetForImageClassification(__A )
model.to(__A )
model.eval()
_lowerCamelCase : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase : List[Any] = model(__A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : int = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Union[str, Any] = config_and_inputs
_lowerCamelCase : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( A , A , unittest.TestCase ):
lowerCAmelCase_ = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowerCAmelCase_ = (
{'feature-extraction': FocalNetModel, 'image-classification': FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Optional[int] = FocalNetModelTester(self )
_lowerCamelCase : int = ConfigTester(self,config_class=__A,embed_dim=3_7,has_text_modality=__A )
def lowerCamelCase_ ( self : Union[str, Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase_ ( self : List[str] ):
return
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__A )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__A )
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
@unittest.skip(reason="FocalNet does not use inputs_embeds" )
def lowerCamelCase_ ( self : Optional[int] ):
pass
@unittest.skip(reason="FocalNet does not use feedforward chunking" )
def lowerCamelCase_ ( self : List[str] ):
pass
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase , _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_lowerCamelCase : str = model_class(__A )
self.assertIsInstance(model.get_input_embeddings(),(nn.Module) )
_lowerCamelCase : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__A,nn.Linear ) )
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_lowerCamelCase : Union[str, Any] = model_class(__A )
_lowerCamelCase : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : int = [*signature.parameters.keys()]
_lowerCamelCase : Union[str, Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1],__A )
def lowerCamelCase_ ( self : Tuple,__A : Any,__A : List[Any],__A : str,__A : Any ):
_lowerCamelCase : Union[str, Any] = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
_lowerCamelCase : Optional[int] = model(**self._prepare_for_class(__A,__A ) )
_lowerCamelCase : Optional[int] = outputs.hidden_states
_lowerCamelCase : int = getattr(
self.model_tester,"expected_num_hidden_layers",len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__A ),__A )
# FocalNet has a different seq_length
_lowerCamelCase : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCamelCase : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ),[num_patches, self.model_tester.embed_dim],)
_lowerCamelCase : Any = outputs.reshaped_hidden_states
self.assertEqual(len(__A ),__A )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Tuple = reshaped_hidden_states[0].shape
_lowerCamelCase : List[str] = (
reshaped_hidden_states[0].view(__A,__A,height * width ).permute(0,2,1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ),[num_patches, self.model_tester.embed_dim],)
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase , _lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Optional[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
_lowerCamelCase : List[Any] = True
self.check_hidden_states_output(__A,__A,__A,__A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase : List[Any] = True
self.check_hidden_states_output(__A,__A,__A,__A )
def lowerCamelCase_ ( self : Optional[Any] ):
_lowerCamelCase , _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Tuple = 3
_lowerCamelCase : Optional[int] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_lowerCamelCase : Tuple = (
config.patch_size
if isinstance(config.patch_size,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCamelCase : Any = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_lowerCamelCase : int = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
_lowerCamelCase : List[Any] = True
self.check_hidden_states_output(__A,__A,__A,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase : Optional[Any] = True
self.check_hidden_states_output(__A,__A,__A,(padded_height, padded_width) )
@slow
def lowerCamelCase_ ( self : Tuple ):
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Dict = FocalNetModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase , _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Optional[Any] = _config_zero_init(__A )
for model_class in self.all_model_classes:
_lowerCamelCase : Any = model_class(config=__A )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item(),[0.0, 1.0],msg=f'Parameter {name} of model {model_class} seems not properly initialized',)
@require_vision
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self : Union[str, Any] ):
# TODO update organization
return AutoImageProcessor.from_pretrained("microsoft/focalnet-tiny" ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : Any = FocalNetForImageClassification.from_pretrained("microsoft/focalnet-tiny" ).to(__A )
_lowerCamelCase : int = self.default_image_processor
_lowerCamelCase : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
_lowerCamelCase : Dict = image_processor(images=__A,return_tensors="pt" ).to(__A )
# forward pass
with torch.no_grad():
_lowerCamelCase : Dict = model(**__A )
# verify the logits
_lowerCamelCase : List[Any] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape,__A )
_lowerCamelCase : List[str] = torch.tensor([0.2166, -0.4368, 0.2191] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3],__A,atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item(),2_8_1 )
@require_torch
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = (FocalNetBackbone,) if is_torch_available() else ()
lowerCAmelCase_ = FocalNetConfig
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : int = FocalNetModelTester(self )
| 44 | 0 |
SCREAMING_SNAKE_CASE__ : Dict = [0, 2, 4, 6, 8]
SCREAMING_SNAKE_CASE__ : Optional[Any] = [1, 3, 5, 7, 9]
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> int:
'''simple docstring'''
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
UpperCAmelCase__ : Optional[int] = 0
for digit in range(10 ):
UpperCAmelCase__ : Dict = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , __lowerCamelCase , __lowerCamelCase )
return result
UpperCAmelCase__ : Dict = 0
for digita in range(10 ):
UpperCAmelCase__ : Union[str, Any] = digita
if (remainder + digita) % 2 == 0:
UpperCAmelCase__ : Any = ODD_DIGITS
else:
UpperCAmelCase__ : Optional[Any] = EVEN_DIGITS
for digita in other_parity_digits:
UpperCAmelCase__ : Optional[Any] = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , __lowerCamelCase , __lowerCamelCase , )
return result
def _lowerCamelCase ( __lowerCamelCase = 9 ) -> int:
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(__lowerCamelCase , 0 , [0] * length , __lowerCamelCase )
return result
if __name__ == "__main__":
print(f'''{solution() = }''')
| 79 |
'''simple docstring'''
class UpperCAmelCase__ :
def __init__( self : Any,__A : Any,__A : Any,__A : Any ):
_lowerCamelCase : List[Any] = name
_lowerCamelCase : Union[str, Any] = value
_lowerCamelCase : str = weight
def __repr__( self : Any ):
return f'{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'
def lowerCamelCase_ ( self : Optional[int] ):
return self.value
def lowerCamelCase_ ( self : Any ):
return self.name
def lowerCamelCase_ ( self : List[Any] ):
return self.weight
def lowerCamelCase_ ( self : str ):
return self.value / self.weight
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : str = []
for i in range(len(_lowerCAmelCase ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Dict = sorted(_lowerCAmelCase , key=_lowerCAmelCase , reverse=_lowerCAmelCase )
_lowerCamelCase : Optional[int] = []
_lowerCamelCase , _lowerCamelCase : Optional[int] = 0.0, 0.0
for i in range(len(_lowerCAmelCase ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def A_ ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44 | 0 |
def snake_case ( lowerCamelCase = 1_000 ):
'''simple docstring'''
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 80 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase_ : List[Any] = {
'configuration_conditional_detr': [
'CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ConditionalDetrConfig',
'ConditionalDetrOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[int] = ['ConditionalDetrFeatureExtractor']
UpperCAmelCase_ : str = ['ConditionalDetrImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : str = [
'CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConditionalDetrForObjectDetection',
'ConditionalDetrForSegmentation',
'ConditionalDetrModel',
'ConditionalDetrPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 44 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_snake_case : Any = {
"configuration_chinese_clip": [
"CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ChineseCLIPConfig",
"ChineseCLIPOnnxConfig",
"ChineseCLIPTextConfig",
"ChineseCLIPVisionConfig",
],
"processing_chinese_clip": ["ChineseCLIPProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Dict = ["ChineseCLIPFeatureExtractor"]
_snake_case : List[str] = ["ChineseCLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : int = [
"CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ChineseCLIPModel",
"ChineseCLIPPreTrainedModel",
"ChineseCLIPTextModel",
"ChineseCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
_snake_case : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 81 |
'''simple docstring'''
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Tuple = tmp_path / "file.csv"
_lowerCamelCase : Optional[int] = textwrap.dedent(
"\\n header1,header2\n 1,2\n 10,20\n " )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def A_ ( _lowerCAmelCase : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Any = tmp_path / "malformed_file.csv"
_lowerCamelCase : Any = textwrap.dedent(
"\\n header1,header2\n 1,2\n 10,20,\n " )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : int = tmp_path / "csv_with_image.csv"
_lowerCamelCase : int = textwrap.dedent(
F'\\n image\n {image_file}\n ' )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : Dict = tmp_path / "csv_with_label.csv"
_lowerCamelCase : int = textwrap.dedent(
"\\n label\n good\n bad\n good\n " )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def A_ ( _lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : Dict = tmp_path / "csv_with_int_list.csv"
_lowerCamelCase : Any = textwrap.dedent(
"\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n " )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : List[Any] = Csv()
_lowerCamelCase : Any = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(_lowerCAmelCase , match="Error tokenizing data" ):
for _ in generator:
pass
assert any(
record.levelname == "ERROR"
and "Failed to read file" in record.message
and os.path.basename(_lowerCAmelCase ) in record.message
for record in caplog.records )
@require_pil
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
with open(_lowerCAmelCase , encoding="utf-8" ) as f:
_lowerCamelCase : Any = f.read().splitlines()[1]
_lowerCamelCase : Optional[Any] = Csv(encoding="utf-8" , features=Features({"image": Image()} ) )
_lowerCamelCase : Union[str, Any] = csv._generate_tables([[csv_file_with_image]] )
_lowerCamelCase : List[str] = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("image" ).type == Image()()
_lowerCamelCase : int = pa_table.to_pydict()["image"]
assert generated_content == [{"path": image_file, "bytes": None}]
def A_ ( _lowerCAmelCase : List[Any] ):
"""simple docstring"""
with open(_lowerCAmelCase , encoding="utf-8" ) as f:
_lowerCamelCase : List[Any] = f.read().splitlines()[1:]
_lowerCamelCase : int = Csv(encoding="utf-8" , features=Features({"label": ClassLabel(names=["good", "bad"] )} ) )
_lowerCamelCase : Tuple = csv._generate_tables([[csv_file_with_label]] )
_lowerCamelCase : int = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("label" ).type == ClassLabel(names=["good", "bad"] )()
_lowerCamelCase : Union[str, Any] = pa_table.to_pydict()["label"]
assert generated_content == [ClassLabel(names=["good", "bad"] ).straint(_lowerCAmelCase ) for label in labels]
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : Dict = Csv(encoding="utf-8" , sep="," , converters={"int_list": lambda _lowerCAmelCase : [int(_lowerCAmelCase ) for i in x.split()]} )
_lowerCamelCase : List[Any] = csv._generate_tables([[csv_file_with_int_list]] )
_lowerCamelCase : Optional[int] = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field("int_list" ).type )
_lowerCamelCase : Optional[Any] = pa_table.to_pydict()["int_list"]
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 44 | 0 |
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowercase__ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = '''ssube/stable-diffusion-x4-upscaler-onnx'''
def lowercase__ ( self : str , _UpperCAmelCase : int=0 ) -> int:
'''simple docstring'''
UpperCAmelCase_ = floats_tensor((1, 3, 128, 128) , rng=random.Random(_UpperCAmelCase ) )
UpperCAmelCase_ = torch.manual_seed(_UpperCAmelCase )
UpperCAmelCase_ = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def lowercase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase_ = self.get_dummy_inputs()
UpperCAmelCase_ = pipe(**_UpperCAmelCase ).images
UpperCAmelCase_ = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ = np.array(
[0.697_4782, 0.6890_2093, 0.7013_5885, 0.758_3618, 0.780_4545, 0.785_4912, 0.7866_7426, 0.7874_3863, 0.7807_0223] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def lowercase__ ( self : Optional[int] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
UpperCAmelCase_ = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase_ = self.get_dummy_inputs()
UpperCAmelCase_ = pipe(**_UpperCAmelCase ).images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ = np.array(
[0.689_8892, 0.5924_0556, 0.5249_9527, 0.5886_6215, 0.5225_8235, 0.5257_2715, 0.6241_4473, 0.617_4387, 0.621_4964] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowercase__ ( self : Any ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
UpperCAmelCase_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase_ = self.get_dummy_inputs()
UpperCAmelCase_ = pipe(**_UpperCAmelCase ).images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ = np.array(
[0.765_9278, 0.7643_7664, 0.7557_9107, 0.769_1116, 0.7766_6986, 0.772_7672, 0.775_8664, 0.781_2226, 0.7694_2515] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowercase__ ( self : int ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
UpperCAmelCase_ = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase_ = self.get_dummy_inputs()
UpperCAmelCase_ = pipe(**_UpperCAmelCase ).images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ = np.array(
[0.697_4782, 0.6890_2093, 0.7013_5885, 0.758_3618, 0.780_4545, 0.785_4912, 0.7866_7426, 0.7874_3863, 0.7807_0223] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowercase__ ( self : Dict ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
UpperCAmelCase_ = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase_ = self.get_dummy_inputs()
UpperCAmelCase_ = pipe(**_UpperCAmelCase ).images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ = np.array(
[0.7742_4496, 0.77_3601, 0.764_5288, 0.776_9598, 0.777_2739, 0.773_8688, 0.7818_7233, 0.7787_9584, 0.76_7043] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
@property
def lowercase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowercase__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = ort.SessionOptions()
UpperCAmelCase_ = False
return options
def lowercase__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
UpperCAmelCase_ = init_image.resize((128, 128) )
# using the PNDM scheduler by default
UpperCAmelCase_ = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx" , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase_ = "A fantasy landscape, trending on artstation"
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = pipe(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=10 , generator=_UpperCAmelCase , output_type="np" , )
UpperCAmelCase_ = output.images
UpperCAmelCase_ = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
UpperCAmelCase_ = np.array([0.4883, 0.4947, 0.4980, 0.4975, 0.4982, 0.4980, 0.5000, 0.5006, 0.4972] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def lowercase__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
UpperCAmelCase_ = init_image.resize((128, 128) )
UpperCAmelCase_ = LMSDiscreteScheduler.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx" , subfolder="scheduler" )
UpperCAmelCase_ = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx" , scheduler=_UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase_ = "A fantasy landscape, trending on artstation"
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = pipe(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=20 , generator=_UpperCAmelCase , output_type="np" , )
UpperCAmelCase_ = output.images
UpperCAmelCase_ = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
UpperCAmelCase_ = np.array(
[0.5017_3753, 0.5022_3356, 0.50_2039, 0.5023_3036, 0.502_3725, 0.502_2601, 0.501_8758, 0.5023_4085, 0.5024_1566] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 82 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class UpperCAmelCase__ ( A , A , unittest.TestCase ):
lowerCAmelCase_ = IFInpaintingSuperResolutionPipeline
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'original_image'} )
lowerCAmelCase_ = PipelineTesterMixin.required_optional_params - {'latents'}
def lowerCamelCase_ ( self : List[str] ):
return self._get_superresolution_dummy_components()
def lowerCamelCase_ ( self : str,__A : List[str],__A : List[str]=0 ):
if str(__A ).startswith("mps" ):
_lowerCamelCase : List[str] = torch.manual_seed(__A )
else:
_lowerCamelCase : Optional[int] = torch.Generator(device=__A ).manual_seed(__A )
_lowerCamelCase : List[Any] = floats_tensor((1, 3, 1_6, 1_6),rng=random.Random(__A ) ).to(__A )
_lowerCamelCase : Any = floats_tensor((1, 3, 3_2, 3_2),rng=random.Random(__A ) ).to(__A )
_lowerCamelCase : Tuple = floats_tensor((1, 3, 3_2, 3_2),rng=random.Random(__A ) ).to(__A )
_lowerCamelCase : Dict = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available(),reason="XFormers attention is only available with CUDA and `xformers` installed",)
def lowerCamelCase_ ( self : Optional[int] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def lowerCamelCase_ ( self : Dict ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda",reason="float16 requires CUDA" )
def lowerCamelCase_ ( self : Optional[Any] ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def lowerCamelCase_ ( self : Any ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def lowerCamelCase_ ( self : Dict ):
self._test_save_load_local()
def lowerCamelCase_ ( self : Any ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2,)
| 44 | 0 |
"""simple docstring"""
from __future__ import annotations
from PIL import Image
# Define glider example
lowerCAmelCase__ = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
lowerCAmelCase__ = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def snake_case_ ( A_ : list[list[int]] ):
'''simple docstring'''
_lowerCamelCase : Any = []
for i in range(len(A_ ) ):
_lowerCamelCase : Optional[int] = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
_lowerCamelCase : int = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(A_ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(A_ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(A_ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
_lowerCamelCase : Tuple = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(A_ )
return next_generation
def snake_case_ ( A_ : list[list[int]], A_ : int ):
'''simple docstring'''
_lowerCamelCase : List[str] = []
for _ in range(A_ ):
# Create output image
_lowerCamelCase : Tuple = Image.new('''RGB''', (len(cells[0] ), len(A_ )) )
_lowerCamelCase : Any = img.load()
# Save cells to image
for x in range(len(A_ ) ):
for y in range(len(cells[0] ) ):
_lowerCamelCase : int = 2_55 - cells[y][x] * 2_55
_lowerCamelCase : Tuple = (colour, colour, colour)
# Save image
images.append(A_ )
_lowerCamelCase : Optional[Any] = new_generation(A_ )
return images
if __name__ == "__main__":
lowerCAmelCase__ = generate_images(GLIDER, 16)
images[0].save('''out.gif''', save_all=True, append_images=images[1:])
| 83 |
'''simple docstring'''
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class UpperCAmelCase__ ( A ):
def __init__( self : List[Any],__A : Tuple,__A : Optional[int],__A : Optional[int]=1_0_2_4,__A : int=1_0_2_4,__A : Any=3.6 ):
_lowerCamelCase : List[str] = tokenizer
_lowerCamelCase : Dict = tokenizer.bos_token_id
_lowerCamelCase : Tuple = dataset
_lowerCamelCase : Any = seq_length
_lowerCamelCase : List[Any] = seq_length * chars_per_token * num_of_sequences
def __iter__( self : Tuple ):
_lowerCamelCase : Union[str, Any] = iter(self.dataset )
_lowerCamelCase : str = True
while more_examples:
_lowerCamelCase , _lowerCamelCase : Optional[int] = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(__A )["content"] )
buffer_len += len(buffer[-1] )
except StopIteration:
_lowerCamelCase : Tuple = False
break
_lowerCamelCase : int = tokenizer(__A,truncation=__A )["input_ids"]
_lowerCamelCase : int = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0,len(__A ),self.seq_length ):
_lowerCamelCase : List[str] = all_token_ids[i : i + self.seq_length]
if len(__A ) == self.seq_length:
yield torch.tensor(__A )
def A_ ( _lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = {"streaming": True}
_lowerCamelCase : Optional[Any] = load_dataset(args.dataset_name , split="train" , **_lowerCAmelCase )
_lowerCamelCase : int = ConstantLengthDataset(_lowerCAmelCase , _lowerCAmelCase , seq_length=args.seq_length )
_lowerCamelCase : Dict = DataLoader(_lowerCAmelCase , batch_size=args.batch_size )
return eval_dataloader
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
model.eval()
_lowerCamelCase : Optional[int] = []
for step, batch in enumerate(_lowerCAmelCase ):
with torch.no_grad():
_lowerCamelCase : List[str] = model(_lowerCAmelCase , labels=_lowerCAmelCase )
_lowerCamelCase : List[Any] = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(_lowerCAmelCase ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
_lowerCamelCase : Dict = torch.mean(torch.cat(_lowerCAmelCase ) )
try:
_lowerCamelCase : List[Any] = torch.exp(_lowerCAmelCase )
except OverflowError:
_lowerCamelCase : Optional[int] = float("inf" )
return loss.item(), perplexity.item()
# Setup Accelerator
UpperCAmelCase_ : List[str] = Accelerator()
# Parse configuration
UpperCAmelCase_ : Tuple = HfArgumentParser(EvaluationArguments)
UpperCAmelCase_ : Dict = parser.parse_args()
set_seed(args.seed)
# Logging
UpperCAmelCase_ : Optional[int] = logging.getLogger(__name__)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
# Load model and tokenizer
UpperCAmelCase_ : Tuple = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
UpperCAmelCase_ : int = create_dataloader(args)
# Prepare everything with our `accelerator`.
UpperCAmelCase_, UpperCAmelCase_ : Dict = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('Evaluating and saving model after training')
UpperCAmelCase_, UpperCAmelCase_ : str = evaluate(args)
logger.info(f'''loss/eval: {eval_loss}, perplexity: {perplexity}''')
| 44 | 0 |
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
UpperCAmelCase = [
'''kernels/rwkv/wkv_cuda.cu''',
'''kernels/rwkv/wkv_op.cpp''',
'''kernels/deformable_detr/ms_deform_attn.h''',
'''kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh''',
'''models/graphormer/algos_graphormer.pyx''',
]
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
# Test all the extensions added in the setup
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--check_lib''', action='''store_true''', help='''Whether to check the build or the actual package.''')
UpperCAmelCase = parser.parse_args()
if args.check_lib:
UpperCAmelCase = importlib.import_module('''transformers''')
UpperCAmelCase = Path(transformers_module.__file__).parent
else:
UpperCAmelCase = Path.cwd() / '''build/lib/transformers'''
if not test_custom_files_are_present(transformers_path):
raise ValueError('''The built release does not contain the custom files. Fix this before going further!''')
| 84 |
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : List[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase_ : Union[str, Any] = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
UpperCAmelCase_ : List[str] = {
'allenai/led-base-16384': 1_6384,
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = LEDTokenizer
lowerCAmelCase_ = ['input_ids', 'attention_mask']
def __init__( self : Union[str, Any],__A : List[Any]=None,__A : str=None,__A : str=None,__A : Optional[int]="replace",__A : Union[str, Any]="<s>",__A : Union[str, Any]="</s>",__A : Any="</s>",__A : Optional[int]="<s>",__A : List[str]="<unk>",__A : str="<pad>",__A : Tuple="<mask>",__A : Union[str, Any]=False,__A : Optional[int]=True,**__A : Optional[int],):
super().__init__(
__A,__A,tokenizer_file=__A,errors=__A,bos_token=__A,eos_token=__A,sep_token=__A,cls_token=__A,unk_token=__A,pad_token=__A,mask_token=__A,add_prefix_space=__A,trim_offsets=__A,**__A,)
_lowerCamelCase : List[str] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space",__A ) != add_prefix_space:
_lowerCamelCase : str = getattr(__A,pre_tok_state.pop("type" ) )
_lowerCamelCase : List[Any] = add_prefix_space
_lowerCamelCase : Tuple = pre_tok_class(**__A )
_lowerCamelCase : Optional[int] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_lowerCamelCase : List[str] = "post_processor"
_lowerCamelCase : int = getattr(self.backend_tokenizer,__A,__A )
if tokenizer_component_instance:
_lowerCamelCase : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCamelCase : str = tuple(state["sep"] )
if "cls" in state:
_lowerCamelCase : List[str] = tuple(state["cls"] )
_lowerCamelCase : Dict = False
if state.get("add_prefix_space",__A ) != add_prefix_space:
_lowerCamelCase : List[str] = add_prefix_space
_lowerCamelCase : List[Any] = True
if state.get("trim_offsets",__A ) != trim_offsets:
_lowerCamelCase : List[str] = trim_offsets
_lowerCamelCase : List[str] = True
if changes_to_apply:
_lowerCamelCase : Tuple = getattr(__A,state.pop("type" ) )
_lowerCamelCase : Any = component_class(**__A )
setattr(self.backend_tokenizer,__A,__A )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def lowerCamelCase_ ( self : str ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCamelCase_ ( self : List[str],__A : str ):
_lowerCamelCase : Optional[Any] = AddedToken(__A,lstrip=__A,rstrip=__A ) if isinstance(__A,__A ) else value
_lowerCamelCase : str = value
def lowerCamelCase_ ( self : List[str],*__A : List[Any],**__A : int ):
_lowerCamelCase : List[str] = kwargs.get("is_split_into_words",__A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : Optional[int],*__A : Optional[Any],**__A : Union[str, Any] ):
_lowerCamelCase : List[Any] = kwargs.get("is_split_into_words",__A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : Dict,__A : str,__A : Optional[str] = None ):
_lowerCamelCase : List[str] = self._tokenizer.model.save(__A,name=__A )
return tuple(__A )
def lowerCamelCase_ ( self : List[str],__A : Optional[Any],__A : List[str]=None ):
_lowerCamelCase : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self : Dict,__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : Tuple = [self.sep_token_id]
_lowerCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase_ ( self : Any,__A : Union[Dict[str, EncodedInput], BatchEncoding],__A : Optional[int] = None,__A : PaddingStrategy = PaddingStrategy.DO_NOT_PAD,__A : Optional[int] = None,__A : Optional[bool] = None,):
_lowerCamelCase : List[str] = super()._pad(
encoded_inputs=__A,max_length=__A,padding_strategy=__A,pad_to_multiple_of=__A,return_attention_mask=__A,)
# Load from model defaults
if return_attention_mask is None:
_lowerCamelCase : Any = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
_lowerCamelCase : Union[str, Any] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
_lowerCamelCase : Optional[Any] = len(encoded_inputs["global_attention_mask"] ) != len(__A )
if needs_to_be_padded:
_lowerCamelCase : str = len(__A ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
_lowerCamelCase : Tuple = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
_lowerCamelCase : int = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 44 | 0 |
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
SCREAMING_SNAKE_CASE__ : List[str] = HfArgumentParser(InitializationArguments)
SCREAMING_SNAKE_CASE__ : Tuple = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
SCREAMING_SNAKE_CASE__ : int = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"vocab_size": len(tokenizer),
"scale_attn_by_inverse_layer_idx": True,
"reorder_and_upcast_attn": True,
}
# Load model config (GPT-2 large in this case)
SCREAMING_SNAKE_CASE__ : Any = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
SCREAMING_SNAKE_CASE__ : Tuple = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 85 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
def A_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int]=False ):
"""simple docstring"""
_lowerCamelCase : List[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'blocks.{i}.norm1.weight', F'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'blocks.{i}.norm1.bias', F'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((F'blocks.{i}.attn.proj.weight', F'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((F'blocks.{i}.attn.proj.bias', F'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'blocks.{i}.norm2.weight', F'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'blocks.{i}.norm2.bias', F'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc1.weight', F'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc1.bias', F'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc2.weight', F'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc2.bias', F'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_lowerCamelCase : Optional[int] = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def A_ ( _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any]=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
_lowerCamelCase : int = ""
else:
_lowerCamelCase : int = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCamelCase : Any = state_dict.pop(F'blocks.{i}.attn.qkv.weight' )
_lowerCamelCase : Tuple = state_dict.pop(F'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : List[str] = in_proj_weight[
: config.hidden_size, :
]
_lowerCamelCase : List[str] = in_proj_bias[: config.hidden_size]
_lowerCamelCase : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCamelCase : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCamelCase : Any = in_proj_weight[
-config.hidden_size :, :
]
_lowerCamelCase : List[str] = in_proj_bias[-config.hidden_size :]
def A_ ( _lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : List[str] = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = dct.pop(_lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = val
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : Optional[int] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def A_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : str = ViTConfig()
_lowerCamelCase : List[str] = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
_lowerCamelCase : Optional[Any] = True
_lowerCamelCase : Optional[Any] = int(vit_name[-12:-10] )
_lowerCamelCase : str = int(vit_name[-9:-6] )
else:
_lowerCamelCase : List[Any] = 1000
_lowerCamelCase : str = "huggingface/label-files"
_lowerCamelCase : Any = "imagenet-1k-id2label.json"
_lowerCamelCase : int = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" ) , "r" ) )
_lowerCamelCase : str = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
_lowerCamelCase : Optional[Any] = idalabel
_lowerCamelCase : List[str] = {v: k for k, v in idalabel.items()}
_lowerCamelCase : List[str] = int(vit_name[-6:-4] )
_lowerCamelCase : str = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith("tiny" ):
_lowerCamelCase : List[Any] = 192
_lowerCamelCase : Optional[int] = 768
_lowerCamelCase : Union[str, Any] = 12
_lowerCamelCase : Optional[Any] = 3
elif vit_name[9:].startswith("small" ):
_lowerCamelCase : Optional[Any] = 384
_lowerCamelCase : Optional[Any] = 1536
_lowerCamelCase : int = 12
_lowerCamelCase : List[str] = 6
else:
pass
else:
if vit_name[4:].startswith("small" ):
_lowerCamelCase : List[str] = 768
_lowerCamelCase : Optional[Any] = 2304
_lowerCamelCase : List[Any] = 8
_lowerCamelCase : List[Any] = 8
elif vit_name[4:].startswith("base" ):
pass
elif vit_name[4:].startswith("large" ):
_lowerCamelCase : List[Any] = 1024
_lowerCamelCase : Optional[Any] = 4096
_lowerCamelCase : List[Any] = 24
_lowerCamelCase : Union[str, Any] = 16
elif vit_name[4:].startswith("huge" ):
_lowerCamelCase : str = 1280
_lowerCamelCase : List[Any] = 5120
_lowerCamelCase : List[str] = 32
_lowerCamelCase : List[str] = 16
# load original model from timm
_lowerCamelCase : int = timm.create_model(_lowerCAmelCase , pretrained=_lowerCAmelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_lowerCamelCase : Any = timm_model.state_dict()
if base_model:
remove_classification_head_(_lowerCAmelCase )
_lowerCamelCase : Optional[int] = create_rename_keys(_lowerCAmelCase , _lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# load HuggingFace model
if vit_name[-5:] == "in21k":
_lowerCamelCase : int = ViTModel(_lowerCAmelCase ).eval()
else:
_lowerCamelCase : List[str] = ViTForImageClassification(_lowerCAmelCase ).eval()
model.load_state_dict(_lowerCAmelCase )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
_lowerCamelCase : Union[str, Any] = DeiTImageProcessor(size=config.image_size )
else:
_lowerCamelCase : Union[str, Any] = ViTImageProcessor(size=config.image_size )
_lowerCamelCase : Optional[int] = image_processor(images=prepare_img() , return_tensors="pt" )
_lowerCamelCase : Optional[int] = encoding["pixel_values"]
_lowerCamelCase : Union[str, Any] = model(_lowerCAmelCase )
if base_model:
_lowerCamelCase : int = timm_model.forward_features(_lowerCAmelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_lowerCAmelCase , outputs.pooler_output , atol=1E-3 )
else:
_lowerCamelCase : Union[str, Any] = timm_model(_lowerCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCAmelCase , outputs.logits , atol=1E-3 )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(F'Saving model {vit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(_lowerCAmelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_patch16_224',
type=str,
help='Name of the ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
UpperCAmelCase_ : Optional[int] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 44 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : Any=7 , UpperCAmelCase : Any=3 , UpperCAmelCase : str=18 , UpperCAmelCase : Union[str, Any]=30 , UpperCAmelCase : Union[str, Any]=400 , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : Dict=None , ):
A_ = size if size is not None else {"shortest_edge": 20}
A_ = crop_size if crop_size is not None else {"height": 18, "width": 18}
A_ = parent
A_ = batch_size
A_ = num_channels
A_ = image_size
A_ = min_resolution
A_ = max_resolution
A_ = do_resize
A_ = size
A_ = do_center_crop
A_ = crop_size
def __A ( self : Optional[Any] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class _a ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = MobileNetVaImageProcessor if is_vision_available() else None
def __A ( self : Optional[Any] ):
A_ = MobileNetVaImageProcessingTester(self )
@property
def __A ( self : str ):
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self : List[Any] ):
A_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase , "do_resize" ) )
self.assertTrue(hasattr(UpperCAmelCase , "size" ) )
self.assertTrue(hasattr(UpperCAmelCase , "do_center_crop" ) )
self.assertTrue(hasattr(UpperCAmelCase , "crop_size" ) )
def __A ( self : int ):
A_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 20} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
A_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def __A ( self : int ):
pass
def __A ( self : List[str] ):
# Initialize image_processing
A_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , Image.Image )
# Test not batched input
A_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
A_ = image_processing(UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __A ( self : str ):
# Initialize image_processing
A_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , numpify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , np.ndarray )
# Test not batched input
A_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
A_ = image_processing(UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __A ( self : str ):
# Initialize image_processing
A_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , torchify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , torch.Tensor )
# Test not batched input
A_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
A_ = image_processing(UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 86 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : int = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def A_ ( _lowerCAmelCase : int = 5000 ):
"""simple docstring"""
_lowerCamelCase : Dict = [(i * (3 * i - 1)) // 2 for i in range(1 , _lowerCAmelCase )]
for i, pentagonal_i in enumerate(_lowerCAmelCase ):
for j in range(_lowerCAmelCase , len(_lowerCAmelCase ) ):
_lowerCamelCase : List[Any] = pentagonal_nums[j]
_lowerCamelCase : Any = pentagonal_i + pentagonal_j
_lowerCamelCase : Union[str, Any] = pentagonal_j - pentagonal_i
if is_pentagonal(_lowerCAmelCase ) and is_pentagonal(_lowerCAmelCase ):
return b
return -1
if __name__ == "__main__":
print(f'''{solution() = }''')
| 44 | 0 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
_lowerCamelCase : Union[str, Any] = """\
@misc{wu2016googles,
title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
_lowerCamelCase : Union[str, Any] = """\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the 'GLEU score'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score's range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
"""
_lowerCamelCase : str = """\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
'google_bleu': google_bleu score
Examples:
Example 1:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.44
Example 2:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.61
Example 3:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results[\"google_bleu\"], 2))
0.53
Example 4:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results[\"google_bleu\"], 2))
0.4
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase_ ( datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self : Tuple) ->MetricInfo:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''') , id='''sequence'''),
'''references''': datasets.Sequence(
datasets.Sequence(datasets.Value('''string''' , id='''token''') , id='''sequence''') , id='''references'''),
}) , )
def SCREAMING_SNAKE_CASE ( self : List[Any] , UpperCAmelCase__ : List[List[List[str]]] , UpperCAmelCase__ : List[List[str]] , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : int = 4 , ) ->Dict[str, float]:
'''simple docstring'''
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=UpperCAmelCase__ , hypotheses=UpperCAmelCase__ , min_len=UpperCAmelCase__ , max_len=UpperCAmelCase__)
}
| 87 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ : List[Any] = {
'configuration_mobilebert': [
'MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'MobileBertConfig',
'MobileBertOnnxConfig',
],
'tokenization_mobilebert': ['MobileBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[Any] = ['MobileBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[str] = [
'MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileBertForMaskedLM',
'MobileBertForMultipleChoice',
'MobileBertForNextSentencePrediction',
'MobileBertForPreTraining',
'MobileBertForQuestionAnswering',
'MobileBertForSequenceClassification',
'MobileBertForTokenClassification',
'MobileBertLayer',
'MobileBertModel',
'MobileBertPreTrainedModel',
'load_tf_weights_in_mobilebert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Union[str, Any] = [
'TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileBertForMaskedLM',
'TFMobileBertForMultipleChoice',
'TFMobileBertForNextSentencePrediction',
'TFMobileBertForPreTraining',
'TFMobileBertForQuestionAnswering',
'TFMobileBertForSequenceClassification',
'TFMobileBertForTokenClassification',
'TFMobileBertMainLayer',
'TFMobileBertModel',
'TFMobileBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 44 | 0 |
"""simple docstring"""
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def _snake_case ( __snake_case : List[str] ):
"""simple docstring"""
return getitem, k
def _snake_case ( __snake_case : List[Any] , __snake_case : int ):
"""simple docstring"""
return setitem, k, v
def _snake_case ( __snake_case : str ):
"""simple docstring"""
return delitem, k
def _snake_case ( __snake_case : int , __snake_case : Optional[Any] , *__snake_case : Any ):
"""simple docstring"""
try:
return fun(__snake_case , *__snake_case ), None
except Exception as e:
return None, e
UpperCAmelCase = (
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
)
UpperCAmelCase = [
_set("""key_a""", """val_a"""),
_set("""key_a""", """val_b"""),
]
UpperCAmelCase = [
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
_del("""key_a"""),
_del("""key_b"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
]
UpperCAmelCase = [
_get("""key_a"""),
_del("""key_a"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
_del("""key_a"""),
_get("""key_a"""),
]
UpperCAmelCase = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
UpperCAmelCase = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("""key_a""", """val_b"""),
]
@pytest.mark.parametrize(
"""operations""" , (
pytest.param(_add_items , id="""add items""" ),
pytest.param(_overwrite_items , id="""overwrite items""" ),
pytest.param(_delete_items , id="""delete items""" ),
pytest.param(_access_absent_items , id="""access absent items""" ),
pytest.param(_add_with_resize_up , id="""add with resize up""" ),
pytest.param(_add_with_resize_down , id="""add with resize down""" ),
) , )
def _snake_case ( __snake_case : Any ):
"""simple docstring"""
_lowerCamelCase : str = HashMap(initial_block_size=4 )
_lowerCamelCase : List[Any] = {}
for _, (fun, *args) in enumerate(__snake_case ):
_lowerCamelCase , _lowerCamelCase : List[str] = _run_operation(__snake_case , __snake_case , *__snake_case )
_lowerCamelCase , _lowerCamelCase : Dict = _run_operation(__snake_case , __snake_case , *__snake_case )
assert my_res == py_res
assert str(__snake_case ) == str(__snake_case )
assert set(__snake_case ) == set(__snake_case )
assert len(__snake_case ) == len(__snake_case )
assert set(my.items() ) == set(py.items() )
def _snake_case ( ):
"""simple docstring"""
def is_public(__snake_case : str ) -> bool:
return not name.startswith("""_""" )
_lowerCamelCase : Any = {name for name in dir({} ) if is_public(__snake_case )}
_lowerCamelCase : Tuple = {name for name in dir(HashMap() ) if is_public(__snake_case )}
assert dict_public_names > hash_public_names
| 88 |
'''simple docstring'''
from __future__ import annotations
from scipy.special import comb # type: ignore
class UpperCAmelCase__ :
def __init__( self : Optional[Any],__A : list[tuple[float, float]] ):
_lowerCamelCase : Tuple = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
_lowerCamelCase : int = len(__A ) - 1
def lowerCamelCase_ ( self : Optional[int],__A : float ):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
_lowerCamelCase : list[float] = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree,__A ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(__A ),5 ) == 1
return output_values
def lowerCamelCase_ ( self : int,__A : float ):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
_lowerCamelCase : List[Any] = self.basis_function(__A )
_lowerCamelCase : str = 0.0
_lowerCamelCase : str = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def lowerCamelCase_ ( self : Optional[Any],__A : float = 0.01 ):
from matplotlib import pyplot as plt # type: ignore
_lowerCamelCase : list[float] = [] # x coordinates of points to plot
_lowerCamelCase : list[float] = [] # y coordinates of points to plot
_lowerCamelCase : Tuple = 0.0
while t <= 1:
_lowerCamelCase : str = self.bezier_curve_function(__A )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
_lowerCamelCase : List[str] = [i[0] for i in self.list_of_points]
_lowerCamelCase : Union[str, Any] = [i[1] for i in self.list_of_points]
plt.plot(
__A,__A,color="blue",label="Curve of Degree " + str(self.degree ),)
plt.scatter(__A,__A,color="red",label="Control Points" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 44 | 0 |
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(lowerCamelCase_ , n - 1 , lowerCamelCase_ ) * a) % mod
else:
_lowercase : str = binary_exponentiation(lowerCamelCase_ , n / 2 , lowerCamelCase_ )
return (b * b) % mod
# a prime number
SCREAMING_SNAKE_CASE : str = 701
SCREAMING_SNAKE_CASE : Optional[int] = 1000000000
SCREAMING_SNAKE_CASE : Optional[int] = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 89 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase__ ( metaclass=A ):
lowerCAmelCase_ = ['transformers', 'torch', 'note_seq']
def __init__( self : str,*__A : List[str],**__A : List[Any] ):
requires_backends(self,["transformers", "torch", "note_seq"] )
@classmethod
def lowerCamelCase_ ( cls : Optional[Any],*__A : str,**__A : Tuple ):
requires_backends(cls,["transformers", "torch", "note_seq"] )
@classmethod
def lowerCamelCase_ ( cls : Dict,*__A : Dict,**__A : Tuple ):
requires_backends(cls,["transformers", "torch", "note_seq"] )
| 44 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
__UpperCAmelCase = {
'''configuration_longt5''': ['''LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongT5Config''', '''LongT5OnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongT5EncoderModel''',
'''LongT5ForConditionalGeneration''',
'''LongT5Model''',
'''LongT5PreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''FlaxLongT5ForConditionalGeneration''',
'''FlaxLongT5Model''',
'''FlaxLongT5PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 90 |
'''simple docstring'''
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = CodeGenTokenizer
lowerCAmelCase_ = CodeGenTokenizerFast
lowerCAmelCase_ = True
lowerCAmelCase_ = {'add_prefix_space': True}
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : List[str] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCamelCase : Dict = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
_lowerCamelCase : Any = dict(zip(__A,range(len(__A ) ) ) )
_lowerCamelCase : Optional[int] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
_lowerCamelCase : Tuple = {"unk_token": "<unk>"}
_lowerCamelCase : Optional[Any] = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["vocab_file"] )
_lowerCamelCase : Dict = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file,"w",encoding="utf-8" ) as fp:
fp.write(json.dumps(__A ) + "\n" )
with open(self.merges_file,"w",encoding="utf-8" ) as fp:
fp.write("\n".join(__A ) )
def lowerCamelCase_ ( self : Dict,**__A : Tuple ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname,**__A )
def lowerCamelCase_ ( self : Union[str, Any],**__A : int ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname,**__A )
def lowerCamelCase_ ( self : str,__A : Dict ):
_lowerCamelCase : Optional[Any] = "lower newer"
_lowerCamelCase : Union[str, Any] = "lower newer"
return input_text, output_text
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : int = CodeGenTokenizer(self.vocab_file,self.merges_file,**self.special_tokens_map )
_lowerCamelCase : Any = "lower newer"
_lowerCamelCase : Optional[Any] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
_lowerCamelCase : List[Any] = tokenizer.tokenize(__A,add_prefix_space=__A )
self.assertListEqual(__A,__A )
_lowerCamelCase : Union[str, Any] = tokens + [tokenizer.unk_token]
_lowerCamelCase : Dict = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ),__A )
def lowerCamelCase_ ( self : Any ):
if not self.test_rust_tokenizer:
return
_lowerCamelCase : str = self.get_tokenizer()
_lowerCamelCase : Optional[Any] = self.get_rust_tokenizer(add_prefix_space=__A )
_lowerCamelCase : Union[str, Any] = "lower newer"
# Testing tokenization
_lowerCamelCase : List[Any] = tokenizer.tokenize(__A,add_prefix_space=__A )
_lowerCamelCase : str = rust_tokenizer.tokenize(__A )
self.assertListEqual(__A,__A )
# Testing conversion to ids without special tokens
_lowerCamelCase : str = tokenizer.encode(__A,add_special_tokens=__A,add_prefix_space=__A )
_lowerCamelCase : List[str] = rust_tokenizer.encode(__A,add_special_tokens=__A )
self.assertListEqual(__A,__A )
# Testing conversion to ids with special tokens
_lowerCamelCase : List[Any] = self.get_rust_tokenizer(add_prefix_space=__A )
_lowerCamelCase : Union[str, Any] = tokenizer.encode(__A,add_prefix_space=__A )
_lowerCamelCase : Optional[int] = rust_tokenizer.encode(__A )
self.assertListEqual(__A,__A )
# Testing the unknown token
_lowerCamelCase : Optional[int] = tokens + [rust_tokenizer.unk_token]
_lowerCamelCase : Optional[Any] = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__A ),__A )
def lowerCamelCase_ ( self : Tuple,*__A : Any,**__A : Any ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def lowerCamelCase_ ( self : int,__A : Optional[int]=1_5 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_lowerCamelCase : Tuple = self.rust_tokenizer_class.from_pretrained(__A,**__A )
# Simple input
_lowerCamelCase : Dict = "This is a simple input"
_lowerCamelCase : Any = ["This is a simple input 1", "This is a simple input 2"]
_lowerCamelCase : Tuple = ("This is a simple input", "This is a pair")
_lowerCamelCase : Tuple = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(__A,tokenizer_r.encode,__A,max_length=__A,padding="max_length" )
# Simple input
self.assertRaises(__A,tokenizer_r.encode_plus,__A,max_length=__A,padding="max_length" )
# Simple input
self.assertRaises(
__A,tokenizer_r.batch_encode_plus,__A,max_length=__A,padding="max_length",)
# Pair input
self.assertRaises(__A,tokenizer_r.encode,__A,max_length=__A,padding="max_length" )
# Pair input
self.assertRaises(__A,tokenizer_r.encode_plus,__A,max_length=__A,padding="max_length" )
# Pair input
self.assertRaises(
__A,tokenizer_r.batch_encode_plus,__A,max_length=__A,padding="max_length",)
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : str = CodeGenTokenizer.from_pretrained(self.tmpdirname,pad_token="<pad>" )
# Simple input
_lowerCamelCase : Tuple = "This is a simple input"
_lowerCamelCase : Dict = ["This is a simple input looooooooong", "This is a simple input"]
_lowerCamelCase : Dict = ("This is a simple input", "This is a pair")
_lowerCamelCase : Dict = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
_lowerCamelCase : Dict = tokenizer.pad_token_id
_lowerCamelCase : Dict = tokenizer(__A,padding="max_length",max_length=3_0,return_tensors="np" )
_lowerCamelCase : int = tokenizer(__A,padding=__A,truncate=__A,return_tensors="np" )
_lowerCamelCase : List[Any] = tokenizer(*__A,padding="max_length",max_length=6_0,return_tensors="np" )
_lowerCamelCase : Tuple = tokenizer(__A,padding=__A,truncate=__A,return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1],3_0 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1],3_3 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1],6_0 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1],5_2 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : List[Any] = "$$$"
_lowerCamelCase : Tuple = CodeGenTokenizer.from_pretrained(self.tmpdirname,bos_token=__A,add_bos_token=__A )
_lowerCamelCase : List[str] = "This is a simple input"
_lowerCamelCase : Optional[Any] = ["This is a simple input 1", "This is a simple input 2"]
_lowerCamelCase : Union[str, Any] = tokenizer.bos_token_id
_lowerCamelCase : Any = tokenizer(__A )
_lowerCamelCase : List[str] = tokenizer(__A )
self.assertEqual(out_s.input_ids[0],__A )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_lowerCamelCase : int = tokenizer.decode(out_s.input_ids )
_lowerCamelCase : str = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0],__A )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : int = CodeGenTokenizer.from_pretrained("Salesforce/codegen-350M-mono" )
_lowerCamelCase : Optional[Any] = "\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"
_lowerCamelCase : Dict = "\nif len_a > len_b: result = a\nelse: result = b"
_lowerCamelCase : Any = tokenizer.encode(__A )
_lowerCamelCase : str = ["^#", re.escape("<|endoftext|>" ), "^'''", "^\"\"\"", "\n\n\n"]
_lowerCamelCase : List[Any] = tokenizer.decode(__A,truncate_before_pattern=__A )
self.assertEqual(__A,__A )
def lowerCamelCase_ ( self : Any ):
pass
| 44 | 0 |
"""simple docstring"""
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class lowerCAmelCase_ ( _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: List[str] = BertTokenizer
_lowerCamelCase: Optional[int] = BertTokenizerFast
_lowerCamelCase: Optional[Any] = True
_lowerCamelCase: int = True
_lowerCamelCase: str = filter_non_english
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
super().setUp()
A = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Optional[int] ) -> str:
A = 'UNwant\u00E9d,running'
A = 'unwanted, running'
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple:
A = self.tokenizer_class(self.vocab_file )
A = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(A_ ,['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) ,[9, 6, 7, 12, 10, 11] )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
if not self.test_rust_tokenizer:
return
A = self.get_tokenizer()
A = self.get_rust_tokenizer()
A = 'UNwant\u00E9d,running'
A = tokenizer.tokenize(A_ )
A = rust_tokenizer.tokenize(A_ )
self.assertListEqual(A_ ,A_ )
A = tokenizer.encode(A_ ,add_special_tokens=A_ )
A = rust_tokenizer.encode(A_ ,add_special_tokens=A_ )
self.assertListEqual(A_ ,A_ )
A = self.get_rust_tokenizer()
A = tokenizer.encode(A_ )
A = rust_tokenizer.encode(A_ )
self.assertListEqual(A_ ,A_ )
# With lower casing
A = self.get_tokenizer(do_lower_case=A_ )
A = self.get_rust_tokenizer(do_lower_case=A_ )
A = 'UNwant\u00E9d,running'
A = tokenizer.tokenize(A_ )
A = rust_tokenizer.tokenize(A_ )
self.assertListEqual(A_ ,A_ )
A = tokenizer.encode(A_ ,add_special_tokens=A_ )
A = rust_tokenizer.encode(A_ ,add_special_tokens=A_ )
self.assertListEqual(A_ ,A_ )
A = self.get_rust_tokenizer()
A = tokenizer.encode(A_ )
A = rust_tokenizer.encode(A_ )
self.assertListEqual(A_ ,A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
A = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) ,['ah', '\u535A', '\u63A8', 'zz'] )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
A = BasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) ,['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) ,['hello'] )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
A = BasicTokenizer(do_lower_case=A_ ,strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) ,['h\u00E9llo'] )
def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
A = BasicTokenizer(do_lower_case=A_ ,strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) ,['hello'] )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
A = BasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) ,['hello'] )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
A = BasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) ,['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
A = BasicTokenizer(do_lower_case=A_ ,strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _SCREAMING_SNAKE_CASE ( self : str ) -> int:
A = BasicTokenizer(do_lower_case=A_ ,strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
A = BasicTokenizer(do_lower_case=A_ ,never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) ,['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
A = BasicTokenizer()
A = 'a\n\'ll !!to?\'d of, can\'t.'
A = ['a', '\'', 'll', '!', '!', 'to', '?', '\'', 'd', 'of', ',', 'can', '\'', 't', '.']
self.assertListEqual(tokenizer.tokenize(A_ ) ,A_ )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
A = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
A = {}
for i, token in enumerate(A_ ):
A = i
A = WordpieceTokenizer(vocab=A_ ,unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) ,[] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) ,['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) ,['[UNK]', 'runn', '##ing'] )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
A = self.get_tokenizer()
A = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(A_ ) for t in ['Test', '\xad', 'test']] ,[['[UNK]'], [], ['[UNK]']] )
self.assertListEqual(
[rust_tokenizer.tokenize(A_ ) for t in ['Test', '\xad', 'test']] ,[['[UNK]'], [], ['[UNK]']] )
@slow
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
A = self.tokenizer_class.from_pretrained('bert-base-uncased' )
A = tokenizer.encode('sequence builders' ,add_special_tokens=A_ )
A = tokenizer.encode('multi-sequence build' ,add_special_tokens=A_ )
A = tokenizer.build_inputs_with_special_tokens(A_ )
A = tokenizer.build_inputs_with_special_tokens(A_ ,A_ )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
A = self.rust_tokenizer_class.from_pretrained(A_ ,**A_ )
A = F'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
A = tokenizer_r.encode_plus(
A_ ,return_attention_mask=A_ ,return_token_type_ids=A_ ,return_offsets_mapping=A_ ,add_special_tokens=A_ ,)
A = tokenizer_r.do_lower_case if hasattr(A_ ,'do_lower_case' ) else False
A = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] ,tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] ,tokens['offset_mapping'] )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
A = ['的', '人', '有']
A = ''.join(A_ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
A = True
A = self.tokenizer_class.from_pretrained(A_ ,**A_ )
A = self.rust_tokenizer_class.from_pretrained(A_ ,**A_ )
A = tokenizer_p.encode(A_ ,add_special_tokens=A_ )
A = tokenizer_r.encode(A_ ,add_special_tokens=A_ )
A = tokenizer_r.convert_ids_to_tokens(A_ )
A = tokenizer_p.convert_ids_to_tokens(A_ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(A_ ,A_ )
self.assertListEqual(A_ ,A_ )
A = False
A = self.rust_tokenizer_class.from_pretrained(A_ ,**A_ )
A = self.tokenizer_class.from_pretrained(A_ ,**A_ )
A = tokenizer_r.encode(A_ ,add_special_tokens=A_ )
A = tokenizer_p.encode(A_ ,add_special_tokens=A_ )
A = tokenizer_r.convert_ids_to_tokens(A_ )
A = tokenizer_p.convert_ids_to_tokens(A_ )
# it is expected that only the first Chinese character is not preceded by "##".
A = [
F'##{token}' if idx != 0 else token for idx, token in enumerate(A_ )
]
self.assertListEqual(A_ ,A_ )
self.assertListEqual(A_ ,A_ )
| 91 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class UpperCAmelCase__ :
def __init__( self : Any,__A : int=2,__A : Any=3,__A : Optional[int]=6_4,__A : Tuple=None ):
_lowerCamelCase : int = np.random.default_rng(__A )
_lowerCamelCase : List[str] = length
_lowerCamelCase : Optional[Any] = rng.normal(size=(length,) ).astype(np.floataa )
_lowerCamelCase : Optional[int] = a * self.x + b + rng.normal(scale=0.1,size=(length,) ).astype(np.floataa )
def __len__( self : Dict ):
return self.length
def __getitem__( self : str,__A : List[str] ):
return {"x": self.x[i], "y": self.y[i]}
class UpperCAmelCase__ ( torch.nn.Module ):
def __init__( self : Union[str, Any],__A : Optional[Any]=0,__A : Optional[int]=0,__A : Dict=False ):
super().__init__()
_lowerCamelCase : Tuple = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
_lowerCamelCase : List[str] = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
_lowerCamelCase : Optional[int] = True
def lowerCamelCase_ ( self : List[str],__A : Tuple=None ):
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
_lowerCamelCase : Optional[Any] = False
return x * self.a[0] + self.b[0]
class UpperCAmelCase__ ( torch.nn.Module ):
def __init__( self : Union[str, Any],__A : List[str]=0,__A : List[str]=0,__A : int=False ):
super().__init__()
_lowerCamelCase : Optional[int] = torch.nn.Parameter(torch.tensor(__A ).float() )
_lowerCamelCase : Dict = torch.nn.Parameter(torch.tensor(__A ).float() )
_lowerCamelCase : Tuple = True
def lowerCamelCase_ ( self : str,__A : List[Any]=None ):
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
_lowerCamelCase : Optional[Any] = False
return x * self.a + self.b
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : int = 16 ):
"""simple docstring"""
from datasets import load_dataset
from transformers import AutoTokenizer
_lowerCamelCase : Tuple = AutoTokenizer.from_pretrained("bert-base-cased" )
_lowerCamelCase : List[Any] = {"train": "tests/test_samples/MRPC/train.csv", "validation": "tests/test_samples/MRPC/dev.csv"}
_lowerCamelCase : int = load_dataset("csv" , data_files=_lowerCAmelCase )
_lowerCamelCase : Dict = datasets["train"].unique("label" )
_lowerCamelCase : Optional[Any] = {v: i for i, v in enumerate(_lowerCAmelCase )}
def tokenize_function(_lowerCAmelCase : int ):
# max_length=None => use the model max length (it's actually the default)
_lowerCamelCase : Optional[int] = tokenizer(
examples["sentence1"] , examples["sentence2"] , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase , padding="max_length" )
if "label" in examples:
_lowerCamelCase : str = [label_to_id[l] for l in examples["label"]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_lowerCamelCase : Optional[Any] = datasets.map(
_lowerCAmelCase , batched=_lowerCAmelCase , remove_columns=["sentence1", "sentence2", "label"] , )
def collate_fn(_lowerCAmelCase : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowerCAmelCase , padding="max_length" , max_length=128 , return_tensors="pt" )
return tokenizer.pad(_lowerCAmelCase , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
_lowerCamelCase : str = DataLoader(tokenized_datasets["train"] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=2 )
_lowerCamelCase : Optional[int] = DataLoader(tokenized_datasets["validation"] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=1 )
return train_dataloader, eval_dataloader
| 44 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
UpperCamelCase_ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
def __init__( self : Optional[int] , *UpperCAmelCase__ : Optional[int] , **UpperCAmelCase__ : Tuple ):
'''simple docstring'''
warnings.warn(
'''The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DPTImageProcessor instead.''' , UpperCAmelCase__ , )
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
| 92 |
'''simple docstring'''
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Optional[Any] = False, False, False
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = None
lowerCAmelCase_ = True
lowerCAmelCase_ = True
lowerCAmelCase_ = None
# Automatically constructed
lowerCAmelCase_ = "dict"
lowerCAmelCase_ = pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
lowerCAmelCase_ = field(default='Audio' , init=A , repr=A )
def __call__( self : Tuple ):
return self.pa_type
def lowerCamelCase_ ( self : Any,__A : Union[str, bytes, dict] ):
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("To support encoding audio data, please install 'soundfile'." ) from err
if isinstance(__A,__A ):
return {"bytes": None, "path": value}
elif isinstance(__A,__A ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
_lowerCamelCase : List[Any] = BytesIO()
sf.write(__A,value["array"],value["sampling_rate"],format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("pcm" ):
# "PCM" only has raw audio bytes
if value.get("sampling_rate" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object" )
if value.get("bytes" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
_lowerCamelCase : Dict = np.frombuffer(value["bytes"],dtype=np.intaa ).astype(np.floataa ) / 3_2_7_6_7
else:
_lowerCamelCase : str = np.memmap(value["path"],dtype="h",mode="r" ).astype(np.floataa ) / 3_2_7_6_7
_lowerCamelCase : Optional[int] = BytesIO(bytes() )
sf.write(__A,__A,value["sampling_rate"],format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f'An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.' )
def lowerCamelCase_ ( self : Optional[Any],__A : dict,__A : Optional[Dict[str, Union[str, bool, None]]] = None ):
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead." )
_lowerCamelCase , _lowerCamelCase : Optional[Any] = (value["path"], BytesIO(value["bytes"] )) if value["bytes"] is not None else (value["path"], None)
if path is None and file is None:
raise ValueError(f'An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'." ) from err
_lowerCamelCase : Tuple = xsplitext(__A )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
if file is None:
_lowerCamelCase : Tuple = token_per_repo_id or {}
_lowerCamelCase : Union[str, Any] = path.split("::" )[-1]
try:
_lowerCamelCase : str = string_to_dict(__A,config.HUB_DATASETS_URL )["repo_id"]
_lowerCamelCase : str = token_per_repo_id[repo_id]
except (ValueError, KeyError):
_lowerCamelCase : Any = None
with xopen(__A,"rb",use_auth_token=__A ) as f:
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = sf.read(__A )
else:
_lowerCamelCase , _lowerCamelCase : str = sf.read(__A )
_lowerCamelCase : List[str] = array.T
if self.mono:
_lowerCamelCase : List[str] = librosa.to_mono(__A )
if self.sampling_rate and self.sampling_rate != sampling_rate:
_lowerCamelCase : List[str] = librosa.resample(__A,orig_sr=__A,target_sr=self.sampling_rate )
_lowerCamelCase : Optional[Any] = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def lowerCamelCase_ ( self : Any ):
from .features import Value
if self.decode:
raise ValueError("Cannot flatten a decoded Audio feature." )
return {
"bytes": Value("binary" ),
"path": Value("string" ),
}
def lowerCamelCase_ ( self : List[str],__A : Union[pa.StringArray, pa.StructArray] ):
if pa.types.is_string(storage.type ):
_lowerCamelCase : Any = pa.array([None] * len(__A ),type=pa.binary() )
_lowerCamelCase : int = pa.StructArray.from_arrays([bytes_array, storage],["bytes", "path"],mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
_lowerCamelCase : Dict = pa.array([None] * len(__A ),type=pa.string() )
_lowerCamelCase : Any = pa.StructArray.from_arrays([storage, path_array],["bytes", "path"],mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("array" ):
_lowerCamelCase : Tuple = pa.array([Audio().encode_example(__A ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
_lowerCamelCase : Tuple = storage.field("bytes" )
else:
_lowerCamelCase : Any = pa.array([None] * len(__A ),type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
_lowerCamelCase : List[str] = storage.field("path" )
else:
_lowerCamelCase : Tuple = pa.array([None] * len(__A ),type=pa.string() )
_lowerCamelCase : Tuple = pa.StructArray.from_arrays([bytes_array, path_array],["bytes", "path"],mask=storage.is_null() )
return array_cast(__A,self.pa_type )
def lowerCamelCase_ ( self : str,__A : pa.StructArray ):
@no_op_if_value_is_null
def path_to_bytes(__A : Dict ):
with xopen(__A,"rb" ) as f:
_lowerCamelCase : Any = f.read()
return bytes_
_lowerCamelCase : int = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
],type=pa.binary(),)
_lowerCamelCase : str = pa.array(
[os.path.basename(__A ) if path is not None else None for path in storage.field("path" ).to_pylist()],type=pa.string(),)
_lowerCamelCase : Dict = pa.StructArray.from_arrays([bytes_array, path_array],["bytes", "path"],mask=bytes_array.is_null() )
return array_cast(__A,self.pa_type )
| 44 | 0 |
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _lowerCAmelCase :
"""simple docstring"""
@staticmethod
def snake_case ( *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
__magic_name__ :int = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = pipeline(
'zero-shot-object-detection' , model='hf-internal-testing/tiny-random-owlvit-object-detection' )
lowerCAmelCase__ :int = [
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'candidate_labels': ['cat', 'remote', 'couch'],
}
]
return object_detector, examples
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = object_detector(examples[0] , threshold=0.0 )
lowerCAmelCase__ :Optional[Any] = len(__UpperCAmelCase )
self.assertGreater(__UpperCAmelCase , 0 )
self.assertEqual(
__UpperCAmelCase , [
{
'score': ANY(__UpperCAmelCase ),
'label': ANY(__UpperCAmelCase ),
'box': {'xmin': ANY(__UpperCAmelCase ), 'ymin': ANY(__UpperCAmelCase ), 'xmax': ANY(__UpperCAmelCase ), 'ymax': ANY(__UpperCAmelCase )},
}
for i in range(__UpperCAmelCase )
] , )
@require_tf
@unittest.skip('Zero Shot Object Detection not implemented in TF' )
def snake_case ( self ):
'''simple docstring'''
pass
@require_torch
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = pipeline(
'zero-shot-object-detection' , model='hf-internal-testing/tiny-random-owlvit-object-detection' )
lowerCAmelCase__ :str = object_detector(
'./tests/fixtures/tests_samples/COCO/000000039769.png' , candidate_labels=['cat', 'remote', 'couch'] , threshold=0.64 , )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'score': 0.72_35, 'label': 'cat', 'box': {'xmin': 2_0_4, 'ymin': 1_6_7, 'xmax': 2_3_2, 'ymax': 1_9_0}},
{'score': 0.72_18, 'label': 'remote', 'box': {'xmin': 2_0_4, 'ymin': 1_6_7, 'xmax': 2_3_2, 'ymax': 1_9_0}},
{'score': 0.71_84, 'label': 'couch', 'box': {'xmin': 2_0_4, 'ymin': 1_6_7, 'xmax': 2_3_2, 'ymax': 1_9_0}},
{'score': 0.67_48, 'label': 'remote', 'box': {'xmin': 5_7_1, 'ymin': 8_3, 'xmax': 5_9_8, 'ymax': 1_0_3}},
{'score': 0.66_56, 'label': 'cat', 'box': {'xmin': 5_7_1, 'ymin': 8_3, 'xmax': 5_9_8, 'ymax': 1_0_3}},
{'score': 0.66_14, 'label': 'couch', 'box': {'xmin': 5_7_1, 'ymin': 8_3, 'xmax': 5_9_8, 'ymax': 1_0_3}},
{'score': 0.64_56, 'label': 'remote', 'box': {'xmin': 4_9_4, 'ymin': 1_0_5, 'xmax': 5_2_1, 'ymax': 1_2_7}},
{'score': 0.6_42, 'label': 'remote', 'box': {'xmin': 6_7, 'ymin': 2_7_4, 'xmax': 9_3, 'ymax': 2_9_7}},
{'score': 0.64_19, 'label': 'cat', 'box': {'xmin': 4_9_4, 'ymin': 1_0_5, 'xmax': 5_2_1, 'ymax': 1_2_7}},
] , )
lowerCAmelCase__ :Any = object_detector(
[
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'candidate_labels': ['cat', 'remote', 'couch'],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
[
{'score': 0.72_35, 'label': 'cat', 'box': {'xmin': 2_0_4, 'ymin': 1_6_7, 'xmax': 2_3_2, 'ymax': 1_9_0}},
{'score': 0.72_18, 'label': 'remote', 'box': {'xmin': 2_0_4, 'ymin': 1_6_7, 'xmax': 2_3_2, 'ymax': 1_9_0}},
{'score': 0.71_84, 'label': 'couch', 'box': {'xmin': 2_0_4, 'ymin': 1_6_7, 'xmax': 2_3_2, 'ymax': 1_9_0}},
{'score': 0.67_48, 'label': 'remote', 'box': {'xmin': 5_7_1, 'ymin': 8_3, 'xmax': 5_9_8, 'ymax': 1_0_3}},
{'score': 0.66_56, 'label': 'cat', 'box': {'xmin': 5_7_1, 'ymin': 8_3, 'xmax': 5_9_8, 'ymax': 1_0_3}},
{'score': 0.66_14, 'label': 'couch', 'box': {'xmin': 5_7_1, 'ymin': 8_3, 'xmax': 5_9_8, 'ymax': 1_0_3}},
{'score': 0.64_56, 'label': 'remote', 'box': {'xmin': 4_9_4, 'ymin': 1_0_5, 'xmax': 5_2_1, 'ymax': 1_2_7}},
{'score': 0.6_42, 'label': 'remote', 'box': {'xmin': 6_7, 'ymin': 2_7_4, 'xmax': 9_3, 'ymax': 2_9_7}},
{'score': 0.64_19, 'label': 'cat', 'box': {'xmin': 4_9_4, 'ymin': 1_0_5, 'xmax': 5_2_1, 'ymax': 1_2_7}},
]
] , )
@require_torch
@slow
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = pipeline('zero-shot-object-detection' )
lowerCAmelCase__ :str = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'score': 0.28_68, 'label': 'cat', 'box': {'xmin': 3_2_4, 'ymin': 2_0, 'xmax': 6_4_0, 'ymax': 3_7_3}},
{'score': 0.2_77, 'label': 'remote', 'box': {'xmin': 4_0, 'ymin': 7_2, 'xmax': 1_7_7, 'ymax': 1_1_5}},
{'score': 0.25_37, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 5_5, 'xmax': 3_1_5, 'ymax': 4_7_2}},
{'score': 0.14_74, 'label': 'remote', 'box': {'xmin': 3_3_5, 'ymin': 7_4, 'xmax': 3_7_1, 'ymax': 1_8_7}},
{'score': 0.12_08, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 6_4_2, 'ymax': 4_7_6}},
] , )
lowerCAmelCase__ :Any = object_detector(
[
{
'image': 'http://images.cocodataset.org/val2017/000000039769.jpg',
'candidate_labels': ['cat', 'remote', 'couch'],
},
{
'image': 'http://images.cocodataset.org/val2017/000000039769.jpg',
'candidate_labels': ['cat', 'remote', 'couch'],
},
] , )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
[
{'score': 0.28_68, 'label': 'cat', 'box': {'xmin': 3_2_4, 'ymin': 2_0, 'xmax': 6_4_0, 'ymax': 3_7_3}},
{'score': 0.2_77, 'label': 'remote', 'box': {'xmin': 4_0, 'ymin': 7_2, 'xmax': 1_7_7, 'ymax': 1_1_5}},
{'score': 0.25_37, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 5_5, 'xmax': 3_1_5, 'ymax': 4_7_2}},
{'score': 0.14_74, 'label': 'remote', 'box': {'xmin': 3_3_5, 'ymin': 7_4, 'xmax': 3_7_1, 'ymax': 1_8_7}},
{'score': 0.12_08, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 6_4_2, 'ymax': 4_7_6}},
],
[
{'score': 0.28_68, 'label': 'cat', 'box': {'xmin': 3_2_4, 'ymin': 2_0, 'xmax': 6_4_0, 'ymax': 3_7_3}},
{'score': 0.2_77, 'label': 'remote', 'box': {'xmin': 4_0, 'ymin': 7_2, 'xmax': 1_7_7, 'ymax': 1_1_5}},
{'score': 0.25_37, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 5_5, 'xmax': 3_1_5, 'ymax': 4_7_2}},
{'score': 0.14_74, 'label': 'remote', 'box': {'xmin': 3_3_5, 'ymin': 7_4, 'xmax': 3_7_1, 'ymax': 1_8_7}},
{'score': 0.12_08, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 6_4_2, 'ymax': 4_7_6}},
],
] , )
@require_tf
@unittest.skip('Zero Shot Object Detection not implemented in TF' )
def snake_case ( self ):
'''simple docstring'''
pass
@require_torch
@slow
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = 0.2
lowerCAmelCase__ :Optional[Any] = pipeline('zero-shot-object-detection' )
lowerCAmelCase__ :Optional[Any] = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , threshold=__UpperCAmelCase , )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'score': 0.28_68, 'label': 'cat', 'box': {'xmin': 3_2_4, 'ymin': 2_0, 'xmax': 6_4_0, 'ymax': 3_7_3}},
{'score': 0.2_77, 'label': 'remote', 'box': {'xmin': 4_0, 'ymin': 7_2, 'xmax': 1_7_7, 'ymax': 1_1_5}},
{'score': 0.25_37, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 5_5, 'xmax': 3_1_5, 'ymax': 4_7_2}},
] , )
@require_torch
@slow
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = 2
lowerCAmelCase__ :Tuple = pipeline('zero-shot-object-detection' )
lowerCAmelCase__ :str = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , top_k=__UpperCAmelCase , )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'score': 0.28_68, 'label': 'cat', 'box': {'xmin': 3_2_4, 'ymin': 2_0, 'xmax': 6_4_0, 'ymax': 3_7_3}},
{'score': 0.2_77, 'label': 'remote', 'box': {'xmin': 4_0, 'ymin': 7_2, 'xmax': 1_7_7, 'ymax': 1_1_5}},
] , )
| 93 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : str = {
'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'glpn'
def __init__( self : Tuple,__A : Optional[int]=3,__A : Optional[int]=4,__A : str=[2, 2, 2, 2],__A : Union[str, Any]=[8, 4, 2, 1],__A : Tuple=[3_2, 6_4, 1_6_0, 2_5_6],__A : int=[7, 3, 3, 3],__A : str=[4, 2, 2, 2],__A : int=[1, 2, 5, 8],__A : List[Any]=[4, 4, 4, 4],__A : Optional[int]="gelu",__A : int=0.0,__A : Tuple=0.0,__A : Tuple=0.02,__A : Optional[int]=0.1,__A : Optional[int]=1e-6,__A : Optional[int]=6_4,__A : Optional[Any]=1_0,__A : Tuple=-1,**__A : List[str],):
super().__init__(**__A )
_lowerCamelCase : Tuple = num_channels
_lowerCamelCase : Union[str, Any] = num_encoder_blocks
_lowerCamelCase : Dict = depths
_lowerCamelCase : List[Any] = sr_ratios
_lowerCamelCase : str = hidden_sizes
_lowerCamelCase : Any = patch_sizes
_lowerCamelCase : Any = strides
_lowerCamelCase : Dict = mlp_ratios
_lowerCamelCase : int = num_attention_heads
_lowerCamelCase : List[Any] = hidden_act
_lowerCamelCase : str = hidden_dropout_prob
_lowerCamelCase : List[Any] = attention_probs_dropout_prob
_lowerCamelCase : Optional[int] = initializer_range
_lowerCamelCase : Union[str, Any] = drop_path_rate
_lowerCamelCase : str = layer_norm_eps
_lowerCamelCase : Tuple = decoder_hidden_size
_lowerCamelCase : int = max_depth
_lowerCamelCase : Dict = head_in_index
| 44 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def lowercase_ ( __A : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowercase : Dict =ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
lowercase : Optional[Any] =1_2_8
elif "12-12" in model_name:
lowercase : Union[str, Any] =1_2
lowercase : Tuple =1_2
elif "14-14" in model_name:
lowercase : Optional[Any] =1_4
lowercase : List[str] =1_4
elif "16-16" in model_name:
lowercase : Union[str, Any] =1_6
lowercase : Optional[int] =1_6
else:
raise ValueError('''Model not supported''' )
lowercase : List[Any] ='''huggingface/label-files'''
if "speech-commands" in model_name:
lowercase : str =3_5
lowercase : Union[str, Any] ='''speech-commands-v2-id2label.json'''
else:
lowercase : Optional[int] =5_2_7
lowercase : str ='''audioset-id2label.json'''
lowercase : List[str] =json.load(open(hf_hub_download(__A , __A , repo_type='''dataset''' ) , '''r''' ) )
lowercase : Tuple ={int(__A ): v for k, v in idalabel.items()}
lowercase : Dict =idalabel
lowercase : List[str] ={v: k for k, v in idalabel.items()}
return config
def lowercase_ ( __A : Dict ) -> List[str]:
"""simple docstring"""
if "module.v" in name:
lowercase : int =name.replace('''module.v''' , '''audio_spectrogram_transformer''' )
if "cls_token" in name:
lowercase : Tuple =name.replace('''cls_token''' , '''embeddings.cls_token''' )
if "dist_token" in name:
lowercase : int =name.replace('''dist_token''' , '''embeddings.distillation_token''' )
if "pos_embed" in name:
lowercase : Optional[int] =name.replace('''pos_embed''' , '''embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
lowercase : Optional[Any] =name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
# transformer blocks
if "blocks" in name:
lowercase : List[Any] =name.replace('''blocks''' , '''encoder.layer''' )
if "attn.proj" in name:
lowercase : int =name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
lowercase : Dict =name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
lowercase : List[Any] =name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
lowercase : List[str] =name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
lowercase : int =name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
lowercase : str =name.replace('''mlp.fc2''' , '''output.dense''' )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
lowercase : Optional[int] =name.replace('''audio_spectrogram_transformer.norm''' , '''audio_spectrogram_transformer.layernorm''' )
# classifier head
if "module.mlp_head.0" in name:
lowercase : Any =name.replace('''module.mlp_head.0''' , '''classifier.layernorm''' )
if "module.mlp_head.1" in name:
lowercase : Dict =name.replace('''module.mlp_head.1''' , '''classifier.dense''' )
return name
def lowercase_ ( __A : str , __A : List[str] ) -> Tuple:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowercase : Dict =orig_state_dict.pop(__A )
if "qkv" in key:
lowercase : Union[str, Any] =key.split('''.''' )
lowercase : Any =int(key_split[3] )
lowercase : int =config.hidden_size
if "weight" in key:
lowercase : str =val[:dim, :]
lowercase : int =val[dim : dim * 2, :]
lowercase : Tuple =val[-dim:, :]
else:
lowercase : Dict =val[:dim]
lowercase : str =val[dim : dim * 2]
lowercase : Optional[int] =val[-dim:]
else:
lowercase : List[Any] =val
return orig_state_dict
def lowercase_ ( __A : Dict ) -> List[str]:
"""simple docstring"""
lowercase : str =[
'''module.v.head.weight''',
'''module.v.head.bias''',
'''module.v.head_dist.weight''',
'''module.v.head_dist.bias''',
]
for k in ignore_keys:
state_dict.pop(__A , __A )
@torch.no_grad()
def lowercase_ ( __A : str , __A : Optional[Any] , __A : Optional[Any]=False ) -> Optional[Any]:
"""simple docstring"""
lowercase : Any =get_audio_spectrogram_transformer_config(__A )
lowercase : List[Any] ={
'''ast-finetuned-audioset-10-10-0.4593''': (
'''https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.450''': (
'''https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.448''': (
'''https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.448-v2''': (
'''https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1'''
),
'''ast-finetuned-audioset-12-12-0.447''': (
'''https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1'''
),
'''ast-finetuned-audioset-14-14-0.443''': (
'''https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1'''
),
'''ast-finetuned-audioset-16-16-0.442''': (
'''https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1'''
),
'''ast-finetuned-speech-commands-v2''': (
'''https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1'''
),
}
# load original state_dict
lowercase : Dict =model_name_to_url[model_name]
lowercase : Optional[Any] =torch.hub.load_state_dict_from_url(__A , map_location='''cpu''' )
# remove some keys
remove_keys(__A )
# rename some keys
lowercase : Optional[Any] =convert_state_dict(__A , __A )
# load 🤗 model
lowercase : str =ASTForAudioClassification(__A )
model.eval()
model.load_state_dict(__A )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
lowercase : List[Any] =-4.2677393 if '''speech-commands''' not in model_name else -6.845978
lowercase : Union[str, Any] =4.5689974 if '''speech-commands''' not in model_name else 5.5654526
lowercase : Optional[int] =1_0_2_4 if '''speech-commands''' not in model_name else 1_2_8
lowercase : List[Any] =ASTFeatureExtractor(mean=__A , std=__A , max_length=__A )
if "speech-commands" in model_name:
lowercase : Tuple =load_dataset('''speech_commands''' , '''v0.02''' , split='''validation''' )
lowercase : str =dataset[0]['''audio''']['''array''']
else:
lowercase : int =hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' , )
lowercase , lowercase : Optional[int] =torchaudio.load(__A )
lowercase : Any =waveform.squeeze().numpy()
lowercase : Optional[int] =feature_extractor(__A , sampling_rate=1_6_0_0_0 , return_tensors='''pt''' )
# forward pass
lowercase : Optional[int] =model(**__A )
lowercase : List[Any] =outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
lowercase : Union[str, Any] =torch.tensor([-0.8760, -7.0042, -8.6602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
lowercase : str =torch.tensor([-1.1986, -7.0903, -8.2718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
lowercase : Dict =torch.tensor([-2.6128, -8.0080, -9.4344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
lowercase : List[str] =torch.tensor([-1.5080, -7.4534, -8.8917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
lowercase : str =torch.tensor([-0.5050, -6.5833, -8.0843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
lowercase : List[Any] =torch.tensor([-0.3826, -7.0336, -8.2413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
lowercase : Any =torch.tensor([-1.2113, -6.9101, -8.3470] )
elif model_name == "ast-finetuned-speech-commands-v2":
lowercase : Optional[Any] =torch.tensor([6.1589, -8.0566, -8.7984] )
else:
raise ValueError('''Unknown model name''' )
if not torch.allclose(logits[0, :3] , __A , atol=1E-4 ):
raise ValueError('''Logits don\'t match''' )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(__A ).mkdir(exist_ok=__A )
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__A )
print(F'Saving feature extractor to {pytorch_dump_folder_path}' )
feature_extractor.save_pretrained(__A )
if push_to_hub:
print('''Pushing model and feature extractor to the hub...''' )
model.push_to_hub(F'MIT/{model_name}' )
feature_extractor.push_to_hub(F'MIT/{model_name}' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='ast-finetuned-audioset-10-10-0.4593',
type=str,
help='Name of the Audio Spectrogram Transformer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 94 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
UpperCAmelCase_ : str = logging.get_logger(__name__)
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = ['input_features', 'attention_mask']
def __init__( self : Any,__A : List[Any]=8_0,__A : Dict=1_6_0_0_0,__A : Tuple=0.0,__A : Dict=1_0,__A : int=2_5,__A : Union[str, Any]="hamming_window",__A : List[str]=32768.0,__A : Union[str, Any]=0.97,__A : str=1.0,__A : Union[str, Any]=True,__A : Tuple=True,__A : Optional[Any]=False,**__A : Optional[Any],):
super().__init__(feature_size=__A,sampling_rate=__A,padding_value=__A,**__A )
_lowerCamelCase : Dict = feature_size
_lowerCamelCase : List[str] = sampling_rate
_lowerCamelCase : Any = padding_value
_lowerCamelCase : Dict = hop_length
_lowerCamelCase : Tuple = win_length
_lowerCamelCase : str = frame_signal_scale
_lowerCamelCase : List[str] = preemphasis_coeff
_lowerCamelCase : List[str] = mel_floor
_lowerCamelCase : str = normalize_means
_lowerCamelCase : Any = normalize_vars
_lowerCamelCase : List[str] = win_function
_lowerCamelCase : Tuple = return_attention_mask
_lowerCamelCase : List[Any] = win_length * sampling_rate // 1_0_0_0
_lowerCamelCase : List[Any] = hop_length * sampling_rate // 1_0_0_0
_lowerCamelCase : Any = optimal_fft_length(self.sample_size )
_lowerCamelCase : Dict = (self.n_fft // 2) + 1
def lowerCamelCase_ ( self : Any,__A : np.array ):
if self.win_function == "hamming_window":
_lowerCamelCase : Any = window_function(window_length=self.sample_size,name=self.win_function,periodic=__A )
else:
_lowerCamelCase : Optional[int] = window_function(window_length=self.sample_size,name=self.win_function )
_lowerCamelCase : int = mel_filter_bank(
num_frequency_bins=self.n_freqs,num_mel_filters=self.feature_size,min_frequency=0.0,max_frequency=self.sampling_rate / 2.0,sampling_rate=self.sampling_rate,)
_lowerCamelCase : List[str] = spectrogram(
one_waveform * self.frame_signal_scale,window=__A,frame_length=self.sample_size,hop_length=self.sample_stride,fft_length=self.n_fft,center=__A,preemphasis=self.preemphasis_coeff,mel_filters=__A,mel_floor=self.mel_floor,log_mel="log",)
return msfc_features.T
def lowerCamelCase_ ( self : Optional[int],__A : List[str],__A : Dict,__A : int ):
# make sure we normalize float32 arrays
if self.normalize_means:
_lowerCamelCase : Optional[Any] = x[:input_length].mean(axis=0 )
_lowerCamelCase : Optional[int] = np.subtract(__A,__A )
if self.normalize_vars:
_lowerCamelCase : int = x[:input_length].std(axis=0 )
_lowerCamelCase : Any = np.divide(__A,__A )
if input_length < x.shape[0]:
_lowerCamelCase : Tuple = padding_value
# make sure array is in float32
_lowerCamelCase : Optional[int] = x.astype(np.floataa )
return x
def lowerCamelCase_ ( self : Any,__A : List[np.ndarray],__A : Optional[np.ndarray] = None ):
_lowerCamelCase : Optional[int] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(__A,__A,self.padding_value ) for x, n in zip(__A,__A )]
def __call__( self : Optional[Any],__A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]],__A : Union[bool, str, PaddingStrategy] = False,__A : Optional[int] = None,__A : bool = False,__A : Optional[int] = None,__A : Optional[bool] = None,__A : Optional[Union[str, TensorType]] = None,__A : Optional[int] = None,**__A : Optional[Any],):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
f' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'
f' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
_lowerCamelCase : List[str] = isinstance(__A,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
_lowerCamelCase : List[str] = is_batched_numpy or (
isinstance(__A,(list, tuple) ) and (isinstance(raw_speech[0],(np.ndarray, tuple, list) ))
)
if is_batched:
_lowerCamelCase : List[Any] = [np.asarray(__A,dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__A,np.ndarray ):
_lowerCamelCase : Dict = np.asarray(__A,dtype=np.floataa )
elif isinstance(__A,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_lowerCamelCase : Tuple = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_lowerCamelCase : Tuple = [raw_speech]
# extract fbank features
_lowerCamelCase : str = [self._extract_mfsc_features(__A ) for one_waveform in raw_speech]
# convert into correct format for padding
_lowerCamelCase : Union[str, Any] = BatchFeature({"input_features": features} )
_lowerCamelCase : List[Any] = self.pad(
__A,padding=__A,max_length=__A,truncation=__A,pad_to_multiple_of=__A,return_attention_mask=__A,**__A,)
# make sure list is in array format
_lowerCamelCase : Optional[Any] = padded_inputs.get("input_features" )
if isinstance(input_features[0],__A ):
_lowerCamelCase : int = [np.asarray(__A,dtype=np.floataa ) for feature in input_features]
_lowerCamelCase : Dict = padded_inputs.get("attention_mask" )
if attention_mask is not None:
_lowerCamelCase : Dict = [np.asarray(__A,dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
_lowerCamelCase : Dict = (
np.array(__A,dtype=np.intaa )
if self._get_padding_strategies(__A,max_length=__A ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
_lowerCamelCase : Tuple = self.normalize(
padded_inputs["input_features"],attention_mask=__A )
if return_tensors is not None:
_lowerCamelCase : Dict = padded_inputs.convert_to_tensors(__A )
return padded_inputs
| 44 | 0 |
"""simple docstring"""
from __future__ import annotations
lowerCamelCase_ = 8.988E9 # units = N * m^s * C^-2
def snake_case ( A__ ,A__ ,A__ ,A__ ):
UpperCAmelCase_ : str = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if distance < 0:
raise ValueError("Distance cannot be negative" )
if force == 0:
UpperCAmelCase_ : Any = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
UpperCAmelCase_ : Optional[Any] = abs(A__ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
UpperCAmelCase_ : Any = abs(A__ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
UpperCAmelCase_ : Tuple = (COULOMBS_CONSTANT * charge_product / abs(A__ )) ** 0.5
return {"distance": distance}
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 95 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Dict = [
('bert.bert', 'visual_bert'),
('bert.cls', 'cls'),
('bert.classifier', 'cls'),
('token_type_embeddings_visual', 'visual_token_type_embeddings'),
('position_embeddings_visual', 'visual_position_embeddings'),
('projection', 'visual_projection'),
]
UpperCAmelCase_ : int = [
'nlvr2_coco_pre_trained.th',
'nlvr2_fine_tuned.th',
'nlvr2_pre_trained.th',
'vcr_coco_pre_train.th',
'vcr_fine_tune.th',
'vcr_pre_train.th',
'vqa_coco_pre_trained.th',
'vqa_fine_tuned.th',
'vqa_pre_trained.th',
]
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = torch.load(_lowerCAmelCase , map_location="cpu" )
return sd
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple=rename_keys_prefix ):
"""simple docstring"""
_lowerCamelCase : Any = OrderedDict()
_lowerCamelCase : str = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
_lowerCamelCase : Any = key
for name_pair in rename_keys_prefix:
_lowerCamelCase : Dict = new_key.replace(name_pair[0] , name_pair[1] )
_lowerCamelCase : Any = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
_lowerCamelCase : List[str] = new_d["cls.predictions.bias"]
return new_d
@torch.no_grad()
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : Dict ):
"""simple docstring"""
assert (
checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS
), F'The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'
# Get Config
if "pre" in checkpoint_path:
_lowerCamelCase : Optional[int] = "pretraining"
if "vcr" in checkpoint_path:
_lowerCamelCase : Union[str, Any] = {"visual_embedding_dim": 512}
elif "vqa_advanced" in checkpoint_path:
_lowerCamelCase : List[str] = {"visual_embedding_dim": 2048}
elif "vqa" in checkpoint_path:
_lowerCamelCase : int = {"visual_embedding_dim": 2048}
elif "nlvr" in checkpoint_path:
_lowerCamelCase : List[str] = {"visual_embedding_dim": 1024}
else:
raise NotImplementedError(F'No implementation found for `{checkpoint_path}`.' )
else:
if "vcr" in checkpoint_path:
_lowerCamelCase : Any = {"visual_embedding_dim": 512}
_lowerCamelCase : List[Any] = "multichoice"
elif "vqa_advanced" in checkpoint_path:
_lowerCamelCase : Tuple = {"visual_embedding_dim": 2048}
_lowerCamelCase : Dict = "vqa_advanced"
elif "vqa" in checkpoint_path:
_lowerCamelCase : Union[str, Any] = {"visual_embedding_dim": 2048, "num_labels": 3129}
_lowerCamelCase : Optional[int] = "vqa"
elif "nlvr" in checkpoint_path:
_lowerCamelCase : Tuple = {
"visual_embedding_dim": 1024,
"num_labels": 2,
}
_lowerCamelCase : Optional[Any] = "nlvr"
_lowerCamelCase : str = VisualBertConfig(**_lowerCAmelCase )
# Load State Dict
_lowerCamelCase : str = load_state_dict(_lowerCAmelCase )
_lowerCamelCase : List[str] = get_new_dict(_lowerCAmelCase , _lowerCAmelCase )
if model_type == "pretraining":
_lowerCamelCase : List[Any] = VisualBertForPreTraining(_lowerCAmelCase )
elif model_type == "vqa":
_lowerCamelCase : Dict = VisualBertForQuestionAnswering(_lowerCAmelCase )
elif model_type == "nlvr":
_lowerCamelCase : Tuple = VisualBertForVisualReasoning(_lowerCAmelCase )
elif model_type == "multichoice":
_lowerCamelCase : str = VisualBertForMultipleChoice(_lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
# Save Checkpoints
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('orig_checkpoint_path', type=str, help='A path to .th on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', type=str, help='Path to the output PyTorch model.')
UpperCAmelCase_ : Tuple = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 44 | 0 |
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__lowerCamelCase = 'pt'
elif is_tf_available():
__lowerCamelCase = 'tf'
else:
__lowerCamelCase = 'jax'
class __A ( SCREAMING_SNAKE_CASE_ ,unittest.TestCase ):
UpperCAmelCase__ = ByTaTokenizer
UpperCAmelCase__ = False
def lowerCamelCase__ ( self : Optional[Any] ) -> List[Any]:
super().setUp()
__magic_name__: Union[str, Any] = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCamelCase__ ( self : str ) -> str:
return ByTaTokenizer.from_pretrained("""google/byt5-small""" )
def lowerCamelCase__ ( self : str , **__snake_case : List[str] ) -> ByTaTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__snake_case )
def lowerCamelCase__ ( self : Optional[int] , __snake_case : List[Any] , __snake_case : List[str]=False , __snake_case : str=2_0 , __snake_case : Dict=5 ) -> Tuple[str, list]:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for ByT5 because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
__magic_name__: List[Any] = []
for i in range(len(__snake_case ) ):
try:
__magic_name__: Any = tokenizer.decode([i] , clean_up_tokenization_spaces=__snake_case )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
__magic_name__: List[Any] = list(filter(lambda __snake_case : re.match(R"""^[ a-zA-Z]+$""" , t[1] ) , __snake_case ) )
__magic_name__: Union[str, Any] = list(filter(lambda __snake_case : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=__snake_case ) , __snake_case ) )
if max_length is not None and len(__snake_case ) > max_length:
__magic_name__: Dict = toks[:max_length]
if min_length is not None and len(__snake_case ) < min_length and len(__snake_case ) > 0:
while len(__snake_case ) < min_length:
__magic_name__: int = toks + toks
# toks_str = [t[1] for t in toks]
__magic_name__: str = [t[0] for t in toks]
# Ensure consistency
__magic_name__: int = tokenizer.decode(__snake_case , clean_up_tokenization_spaces=__snake_case )
if " " not in output_txt and len(__snake_case ) > 1:
__magic_name__: List[Any] = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__snake_case )
+ """ """
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__snake_case )
)
if with_prefix_space:
__magic_name__: Any = """ """ + output_txt
__magic_name__: List[Any] = tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
return output_txt, output_ids
def lowerCamelCase__ ( self : Any ) -> Optional[Any]:
__magic_name__: int = self.ta_base_tokenizer
__magic_name__: List[Any] = tokenizer(["""hi</s>""", """I went to the gym</s>""", """</s>"""] )
__magic_name__: List[Any] = tokenizer(["""hi""", """I went to the gym""", """"""] )
self.assertListEqual(batch_with_eos_added["""input_ids"""] , batch_without_eos_added["""input_ids"""] )
def lowerCamelCase__ ( self : List[Any] ) -> List[Any]:
__magic_name__: Any = self.ta_base_tokenizer
__magic_name__: str = """Unicode €."""
__magic_name__: Any = tokenizer(__snake_case )
__magic_name__: Any = [8_8, 1_1_3, 1_0_8, 1_0_2, 1_1_4, 1_0_3, 1_0_4, 3_5, 2_2_9, 1_3_3, 1_7_5, 4_9, 1]
self.assertEqual(encoded["""input_ids"""] , __snake_case )
# decoding
__magic_name__: Optional[Any] = tokenizer.decode(__snake_case )
self.assertEqual(__snake_case , """Unicode €.</s>""" )
__magic_name__: str = tokenizer("""e è é ê ë""" )
__magic_name__: List[str] = [1_0_4, 3_5, 1_9_8, 1_7_1, 3_5, 1_9_8, 1_7_2, 3_5, 1_9_8, 1_7_3, 3_5, 1_9_8, 1_7_4, 1]
self.assertEqual(encoded["""input_ids"""] , __snake_case )
# decoding
__magic_name__: Optional[int] = tokenizer.decode(__snake_case )
self.assertEqual(__snake_case , """e è é ê ë</s>""" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("""e è é ê ë""" ) ) , """e è é ê ë</s>""" )
def lowerCamelCase__ ( self : str ) -> List[Any]:
__magic_name__: Optional[int] = self.ta_base_tokenizer
__magic_name__: Any = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
# fmt: off
__magic_name__: Any = [6_8, 3_5, 1_1_1, 1_1_4, 1_1_3, 1_0_6, 3_5, 1_1_5, 1_0_0, 1_1_7, 1_0_0, 1_0_6, 1_1_7, 1_0_0, 1_1_5, 1_0_7, 3_5, 1_0_5, 1_1_4, 1_1_7, 3_5, 1_1_8, 1_2_0, 1_1_2, 1_1_2, 1_0_0, 1_1_7, 1_0_8, 1_2_5, 1_0_0, 1_1_9, 1_0_8, 1_1_4, 1_1_3, 4_9, 1, 0]
# fmt: on
__magic_name__: Dict = tokenizer(__snake_case , padding=__snake_case , return_tensors=__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
if FRAMEWORK != "jax":
__magic_name__: Optional[Any] = list(batch.input_ids.numpy()[0] )
else:
__magic_name__: List[str] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(__snake_case , __snake_case )
self.assertEqual((2, 3_7) , batch.input_ids.shape )
self.assertEqual((2, 3_7) , batch.attention_mask.shape )
def lowerCamelCase__ ( self : List[str] ) -> int:
__magic_name__: Tuple = self.ta_base_tokenizer
__magic_name__: Union[str, Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
__magic_name__: Tuple = tokenizer(__snake_case , padding=__snake_case , return_tensors=__snake_case )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("""input_ids""" , __snake_case )
self.assertIn("""attention_mask""" , __snake_case )
self.assertNotIn("""decoder_input_ids""" , __snake_case )
self.assertNotIn("""decoder_attention_mask""" , __snake_case )
def lowerCamelCase__ ( self : List[str] ) -> List[Any]:
__magic_name__: Optional[int] = self.ta_base_tokenizer
__magic_name__: Dict = [
"""Summary of the text.""",
"""Another summary.""",
]
__magic_name__: List[str] = tokenizer(
text_target=__snake_case , max_length=3_2 , padding="""max_length""" , truncation=__snake_case , return_tensors=__snake_case )
self.assertEqual(3_2 , targets["""input_ids"""].shape[1] )
def lowerCamelCase__ ( self : List[str] ) -> int:
__magic_name__: str = self.ta_base_tokenizer
__magic_name__: Union[str, Any] = ["""A long paragraph for summarization. </s>"""]
__magic_name__: Any = ["""Summary of the text. </s>"""]
# fmt: off
__magic_name__: int = [6_8, 3_5, 1_1_1, 1_1_4, 1_1_3, 1_0_6, 3_5, 1_1_5, 1_0_0, 1_1_7, 1_0_0, 1_0_6, 1_1_7, 1_0_0, 1_1_5, 1_0_7, 3_5, 1_0_5, 1_1_4, 1_1_7, 3_5, 1_1_8, 1_2_0, 1_1_2, 1_1_2, 1_0_0, 1_1_7, 1_0_8, 1_2_5, 1_0_0, 1_1_9, 1_0_8, 1_1_4, 1_1_3, 4_9, 3_5, 1]
__magic_name__: List[str] = [8_6, 1_2_0, 1_1_2, 1_1_2, 1_0_0, 1_1_7, 1_2_4, 3_5, 1_1_4, 1_0_5, 3_5, 1_1_9, 1_0_7, 1_0_4, 3_5, 1_1_9, 1_0_4, 1_2_3, 1_1_9, 4_9, 3_5, 1]
# fmt: on
__magic_name__: Optional[int] = tokenizer(__snake_case , text_target=__snake_case )
self.assertEqual(__snake_case , batch["""input_ids"""][0] )
self.assertEqual(__snake_case , batch["""labels"""][0] )
def lowerCamelCase__ ( self : Dict ) -> str:
# safety check on max_len default value so we are sure the test works
__magic_name__: int = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
self.assertNotEqual(tokenizer.model_max_length , 4_2 )
# Now let's start the test
__magic_name__: Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
__magic_name__: Optional[Any] = tempfile.mkdtemp()
__magic_name__: str = """ He is very happy, UNwant\u00E9d,running"""
__magic_name__: Dict = tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
tokenizer.save_pretrained(__snake_case )
__magic_name__: int = tokenizer.__class__.from_pretrained(__snake_case )
__magic_name__: Any = after_tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
self.assertListEqual(__snake_case , __snake_case )
shutil.rmtree(__snake_case )
__magic_name__: Optional[int] = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
__magic_name__: Optional[int] = tempfile.mkdtemp()
__magic_name__: str = """ He is very happy, UNwant\u00E9d,running"""
tokenizer.add_tokens(["""bim""", """bambam"""] )
__magic_name__: List[str] = tokenizer.additional_special_tokens
additional_special_tokens.append("""new_additional_special_token""" )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
__magic_name__: List[Any] = tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
tokenizer.save_pretrained(__snake_case )
__magic_name__: List[str] = tokenizer.__class__.from_pretrained(__snake_case )
__magic_name__: Any = after_tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
self.assertListEqual(__snake_case , __snake_case )
self.assertIn("""new_additional_special_token""" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 4_2 )
__magic_name__: Optional[int] = tokenizer.__class__.from_pretrained(__snake_case , model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length , 4_3 )
shutil.rmtree(__snake_case )
def lowerCamelCase__ ( self : str ) -> int:
__magic_name__: Optional[int] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__snake_case )
with open(os.path.join(__snake_case , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
__magic_name__: Any = json.load(__snake_case )
with open(os.path.join(__snake_case , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
__magic_name__: Any = json.load(__snake_case )
__magic_name__: Union[str, Any] = [F'<extra_id_{i}>' for i in range(1_2_5 )]
__magic_name__: List[str] = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
__magic_name__: List[Any] = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
with open(os.path.join(__snake_case , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(__snake_case , __snake_case )
with open(os.path.join(__snake_case , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(__snake_case , __snake_case )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__magic_name__: Dict = tokenizer_class.from_pretrained(
__snake_case , )
self.assertIn(
"""an_additional_special_token""" , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["""an_additional_special_token"""] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["""an_additional_special_token"""] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__magic_name__: Optional[int] = added_tokens_extra_ids + [AddedToken("""a_new_additional_special_token""" , lstrip=__snake_case )]
__magic_name__: List[Any] = tokenizer_class.from_pretrained(
__snake_case , additional_special_tokens=__snake_case , )
self.assertIn("""a_new_additional_special_token""" , tokenizer.additional_special_tokens )
self.assertEqual(
["""a_new_additional_special_token"""] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["""a_new_additional_special_token"""] ) ) , )
def lowerCamelCase__ ( self : Dict ) -> Union[str, Any]:
__magic_name__: List[str] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__snake_case )
__magic_name__: Dict = tokenizer_class.from_pretrained(__snake_case )
self.assertTrue(tokenizer.decode([2_5_5] ) == """""" )
def lowerCamelCase__ ( self : str ) -> str:
pass
def lowerCamelCase__ ( self : str ) -> Tuple:
pass
def lowerCamelCase__ ( self : Dict ) -> Optional[Any]:
pass
def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[Any]:
pass
def lowerCamelCase__ ( self : List[Any] ) -> List[str]:
# The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings
# and special added tokens as tokens
__magic_name__: Tuple = self.get_tokenizers(fast=__snake_case , do_lower_case=__snake_case )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
__magic_name__: Any = ["""t""", """h""", """i""", """s""", """ """, """i""", """s""", """ """, """a""", """ """, """t""", """e""", """x""", """t""", """</s>"""]
__magic_name__: Optional[Any] = tokenizer.convert_tokens_to_string(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCamelCase__ ( self : List[Any] ) -> Any:
__magic_name__: Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
__magic_name__: str = [
"""bos_token""",
"""eos_token""",
"""unk_token""",
"""sep_token""",
"""pad_token""",
"""cls_token""",
"""mask_token""",
]
__magic_name__: List[Any] = 0
__magic_name__: Optional[int] = tokenizer.convert_ids_to_tokens(
__snake_case , skip_special_tokens=__snake_case )
for attr in attributes_list:
setattr(__snake_case , attr + """_id""" , __snake_case )
self.assertEqual(getattr(__snake_case , __snake_case ) , __snake_case )
self.assertEqual(getattr(__snake_case , attr + """_id""" ) , __snake_case )
setattr(__snake_case , attr + """_id""" , __snake_case )
self.assertEqual(getattr(__snake_case , __snake_case ) , __snake_case )
self.assertEqual(getattr(__snake_case , attr + """_id""" ) , __snake_case )
setattr(__snake_case , """additional_special_tokens_ids""" , [] )
self.assertListEqual(getattr(__snake_case , """additional_special_tokens""" ) , [] )
self.assertListEqual(getattr(__snake_case , """additional_special_tokens_ids""" ) , [] )
setattr(__snake_case , """additional_special_tokens_ids""" , [token_id_to_test_setters] )
self.assertListEqual(getattr(__snake_case , """additional_special_tokens""" ) , [token_to_test_setters] )
self.assertListEqual(getattr(__snake_case , """additional_special_tokens_ids""" ) , [token_id_to_test_setters] )
| 96 |
'''simple docstring'''
import functools
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : list[int] ):
"""simple docstring"""
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or not all(isinstance(_lowerCAmelCase , _lowerCAmelCase ) for day in days ):
raise ValueError("The parameter days should be a list of integers" )
if len(_lowerCAmelCase ) != 3 or not all(isinstance(_lowerCAmelCase , _lowerCAmelCase ) for cost in costs ):
raise ValueError("The parameter costs should be a list of three integers" )
if len(_lowerCAmelCase ) == 0:
return 0
if min(_lowerCAmelCase ) <= 0:
raise ValueError("All days elements should be greater than 0" )
if max(_lowerCAmelCase ) >= 366:
raise ValueError("All days elements should be less than 366" )
_lowerCamelCase : Union[str, Any] = set(_lowerCAmelCase )
@functools.cache
def dynamic_programming(_lowerCAmelCase : int ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44 | 0 |
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class lowercase__( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] ) -> Optional[Any]:
super().__init__()
lowercase_ = nn.Linear(3 , 4 )
lowercase_ = nn.BatchNormad(4 )
lowercase_ = nn.Linear(4 , 5 )
def _lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Tuple:
return self.lineara(self.batchnorm(self.lineara(SCREAMING_SNAKE_CASE_ ) ) )
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : int , *SCREAMING_SNAKE_CASE_ : Dict , **SCREAMING_SNAKE_CASE_ : str ) -> Optional[Any]:
return (args[0] + 1,) + args[1:], kwargs
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[Any] ) -> List[str]:
return output + 1
class lowercase__( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : List[Any] ) -> Dict:
lowercase_ = ModelForTest()
lowercase_ = ModelHook()
add_hook_to_module(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(test_model._hf_hook , SCREAMING_SNAKE_CASE_ )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''_old_forward''' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , '''forward''' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['''x'''] )
remove_hook_from_module(SCREAMING_SNAKE_CASE_ )
self.assertFalse(hasattr(SCREAMING_SNAKE_CASE_ , '''_hf_hook''' ) )
self.assertFalse(hasattr(SCREAMING_SNAKE_CASE_ , '''_old_forward''' ) )
def _lowercase ( self : int ) -> Tuple:
lowercase_ = ModelForTest()
lowercase_ = ModelHook()
add_hook_to_module(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
add_hook_to_module(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , append=SCREAMING_SNAKE_CASE_ )
self.assertEqual(isinstance(test_model._hf_hook , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''_old_forward''' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , '''forward''' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['''x'''] )
remove_hook_from_module(SCREAMING_SNAKE_CASE_ )
self.assertFalse(hasattr(SCREAMING_SNAKE_CASE_ , '''_hf_hook''' ) )
self.assertFalse(hasattr(SCREAMING_SNAKE_CASE_ , '''_old_forward''' ) )
def _lowercase ( self : List[Any] ) -> int:
lowercase_ = ModelForTest()
lowercase_ = torch.randn(2 , 3 )
lowercase_ = test_model(x + 1 )
lowercase_ = test_model(x + 2 )
lowercase_ = PreForwardHook()
add_hook_to_module(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = test_model(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
lowercase_ = PreForwardHook()
add_hook_to_module(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = test_model(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
lowercase_ = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = test_model(SCREAMING_SNAKE_CASE_ )
assert torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-5 )
def _lowercase ( self : Optional[Any] ) -> int:
lowercase_ = ModelForTest()
lowercase_ = torch.randn(2 , 3 )
lowercase_ = test_model(SCREAMING_SNAKE_CASE_ )
lowercase_ = PostForwardHook()
add_hook_to_module(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = test_model(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , output + 1 , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
lowercase_ = PostForwardHook()
add_hook_to_module(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = test_model(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , output + 1 , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
lowercase_ = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = test_model(SCREAMING_SNAKE_CASE_ )
assert torch.allclose(SCREAMING_SNAKE_CASE_ , output + 2 , atol=1e-5 )
def _lowercase ( self : List[str] ) -> Optional[Any]:
lowercase_ = ModelForTest()
lowercase_ = torch.randn(2 , 3 )
lowercase_ = test_model(SCREAMING_SNAKE_CASE_ )
lowercase_ = PostForwardHook()
add_hook_to_module(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = test_model(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , output + 1 ) )
self.assertTrue(outputa.requires_grad )
lowercase_ = True
lowercase_ = test_model(SCREAMING_SNAKE_CASE_ )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def _lowercase ( self : Optional[Any] ) -> int:
lowercase_ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
lowercase_ = torch.randn(2 , 3 )
lowercase_ = model(SCREAMING_SNAKE_CASE_ )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(SCREAMING_SNAKE_CASE_ , AlignDevicesHook(io_same_device=SCREAMING_SNAKE_CASE_ ) )
lowercase_ = torch.randn(2 , 3 ).to(0 )
lowercase_ = model(SCREAMING_SNAKE_CASE_ )
self.assertEqual(output.device , torch.device(0 ) )
def _lowercase ( self : List[str] ) -> Dict:
lowercase_ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
lowercase_ = {'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''', '''offload''': True}
add_hook_to_module(model.lineara , AlignDevicesHook(**SCREAMING_SNAKE_CASE_ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**SCREAMING_SNAKE_CASE_ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**SCREAMING_SNAKE_CASE_ ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
# Buffers are not included in the offload by default, so are on the execution device
lowercase_ = torch.device(hook_kwargs['''execution_device'''] )
self.assertEqual(model.batchnorm.running_mean.device , SCREAMING_SNAKE_CASE_ )
lowercase_ = torch.randn(2 , 3 )
lowercase_ = model(SCREAMING_SNAKE_CASE_ )
self.assertEqual(output.device , SCREAMING_SNAKE_CASE_ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# Now test with buffers included in the offload
lowercase_ = {
'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''',
'''offload''': True,
'''offload_buffers''': True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**SCREAMING_SNAKE_CASE_ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**SCREAMING_SNAKE_CASE_ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**SCREAMING_SNAKE_CASE_ ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta''' ) )
lowercase_ = torch.randn(2 , 3 )
lowercase_ = model(SCREAMING_SNAKE_CASE_ )
self.assertEqual(output.device , SCREAMING_SNAKE_CASE_ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
def _lowercase ( self : Optional[Any] ) -> List[str]:
lowercase_ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
lowercase_ = 0 if torch.cuda.is_available() else '''cpu'''
attach_align_device_hook(SCREAMING_SNAKE_CASE_ , execution_device=SCREAMING_SNAKE_CASE_ , offload=SCREAMING_SNAKE_CASE_ )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
# Buffers are not included in the offload by default, so are on the execution device
lowercase_ = torch.device(SCREAMING_SNAKE_CASE_ )
self.assertEqual(model.batchnorm.running_mean.device , SCREAMING_SNAKE_CASE_ )
lowercase_ = torch.randn(2 , 3 )
lowercase_ = model(SCREAMING_SNAKE_CASE_ )
self.assertEqual(output.device , SCREAMING_SNAKE_CASE_ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(SCREAMING_SNAKE_CASE_ )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# Now test with buffers included in the offload
attach_align_device_hook(SCREAMING_SNAKE_CASE_ , execution_device=SCREAMING_SNAKE_CASE_ , offload=SCREAMING_SNAKE_CASE_ , offload_buffers=SCREAMING_SNAKE_CASE_ )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta''' ) )
lowercase_ = torch.randn(2 , 3 )
lowercase_ = model(SCREAMING_SNAKE_CASE_ )
self.assertEqual(output.device , SCREAMING_SNAKE_CASE_ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(SCREAMING_SNAKE_CASE_ )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
def _lowercase ( self : Dict ) -> Any:
lowercase_ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
lowercase_ = 0 if torch.cuda.is_available() else '''cpu'''
attach_align_device_hook(
SCREAMING_SNAKE_CASE_ , execution_device=SCREAMING_SNAKE_CASE_ , offload=SCREAMING_SNAKE_CASE_ , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
# Buffers are not included in the offload by default, so are on the execution device
lowercase_ = torch.device(SCREAMING_SNAKE_CASE_ )
self.assertEqual(model.batchnorm.running_mean.device , SCREAMING_SNAKE_CASE_ )
lowercase_ = torch.randn(2 , 3 )
lowercase_ = model(SCREAMING_SNAKE_CASE_ )
self.assertEqual(output.device , SCREAMING_SNAKE_CASE_ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(SCREAMING_SNAKE_CASE_ )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# Now test with buffers included in the offload
attach_align_device_hook(
SCREAMING_SNAKE_CASE_ , execution_device=SCREAMING_SNAKE_CASE_ , offload=SCREAMING_SNAKE_CASE_ , weights_map=model.state_dict() , offload_buffers=SCREAMING_SNAKE_CASE_ , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta''' ) )
lowercase_ = torch.randn(2 , 3 )
lowercase_ = model(SCREAMING_SNAKE_CASE_ )
self.assertEqual(output.device , SCREAMING_SNAKE_CASE_ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(SCREAMING_SNAKE_CASE_ )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
| 97 |
'''simple docstring'''
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = SwinConfig.from_pretrained(
"microsoft/swin-tiny-patch4-window7-224" , out_features=["stage1", "stage2", "stage3", "stage4"] )
_lowerCamelCase : Dict = MaskFormerConfig(backbone_config=_lowerCAmelCase )
_lowerCamelCase : Tuple = "huggingface/label-files"
if "ade20k-full" in model_name:
# this should be ok
_lowerCamelCase : List[Any] = 847
_lowerCamelCase : str = "maskformer-ade20k-full-id2label.json"
elif "ade" in model_name:
# this should be ok
_lowerCamelCase : Optional[int] = 150
_lowerCamelCase : Union[str, Any] = "ade20k-id2label.json"
elif "coco-stuff" in model_name:
# this should be ok
_lowerCamelCase : Union[str, Any] = 171
_lowerCamelCase : str = "maskformer-coco-stuff-id2label.json"
elif "coco" in model_name:
# TODO
_lowerCamelCase : Optional[int] = 133
_lowerCamelCase : Any = "coco-panoptic-id2label.json"
elif "cityscapes" in model_name:
# this should be ok
_lowerCamelCase : str = 19
_lowerCamelCase : Tuple = "cityscapes-id2label.json"
elif "vistas" in model_name:
# this should be ok
_lowerCamelCase : List[Any] = 65
_lowerCamelCase : Optional[int] = "mapillary-vistas-id2label.json"
_lowerCamelCase : Any = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" ) , "r" ) )
_lowerCamelCase : Optional[int] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
return config
def A_ ( _lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : Any = []
# stem
# fmt: off
rename_keys.append(("backbone.patch_embed.proj.weight", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.proj.bias", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "model.pixel_level_module.encoder.model.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "model.pixel_level_module.encoder.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_index', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((F'backbone.layers.{i}.downsample.reduction.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((F'backbone.layers.{i}.downsample.norm.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((F'backbone.layers.{i}.downsample.norm.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append((F'backbone.norm{i}.weight', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.weight') )
rename_keys.append((F'backbone.norm{i}.bias', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.bias') )
# FPN
rename_keys.append(("sem_seg_head.layer_4.weight", "model.pixel_level_module.decoder.fpn.stem.0.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.weight", "model.pixel_level_module.decoder.fpn.stem.1.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.bias", "model.pixel_level_module.decoder.fpn.stem.1.bias") )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((F'sem_seg_head.adapter_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight') )
rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight') )
rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias') )
rename_keys.append(("sem_seg_head.mask_features.weight", "model.pixel_level_module.decoder.mask_projection.weight") )
rename_keys.append(("sem_seg_head.mask_features.bias", "model.pixel_level_module.decoder.mask_projection.bias") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias') )
# cross-attention out projection
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias') )
# MLP 1
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight', F'model.transformer_module.decoder.layers.{idx}.fc1.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias', F'model.transformer_module.decoder.layers.{idx}.fc1.bias') )
# MLP 2
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight', F'model.transformer_module.decoder.layers.{idx}.fc2.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias', F'model.transformer_module.decoder.layers.{idx}.fc2.bias') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias') )
# layernorm 3 (final layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias') )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.weight", "model.transformer_module.decoder.layernorm.weight") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.bias", "model.transformer_module.decoder.layernorm.bias") )
# heads on top
rename_keys.append(("sem_seg_head.predictor.query_embed.weight", "model.transformer_module.queries_embedder.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.weight", "model.transformer_module.input_projection.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.bias", "model.transformer_module.input_projection.bias") )
rename_keys.append(("sem_seg_head.predictor.class_embed.weight", "class_predictor.weight") )
rename_keys.append(("sem_seg_head.predictor.class_embed.bias", "class_predictor.bias") )
for i in range(3 ):
rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.weight', F'mask_embedder.{i}.0.weight') )
rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.bias', F'mask_embedder.{i}.0.bias') )
# fmt: on
return rename_keys
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : Tuple = dct.pop(_lowerCAmelCase )
_lowerCamelCase : str = val
def A_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : str = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_lowerCamelCase : int = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_lowerCamelCase : Union[str, Any] = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.weight' )
_lowerCamelCase : List[str] = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : Optional[int] = in_proj_weight[:dim, :]
_lowerCamelCase : Optional[int] = in_proj_bias[: dim]
_lowerCamelCase : List[str] = in_proj_weight[
dim : dim * 2, :
]
_lowerCamelCase : List[Any] = in_proj_bias[
dim : dim * 2
]
_lowerCamelCase : List[Any] = in_proj_weight[
-dim :, :
]
_lowerCamelCase : Union[str, Any] = in_proj_bias[-dim :]
# fmt: on
def A_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : int = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
_lowerCamelCase : Tuple = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight' )
_lowerCamelCase : Optional[int] = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : Optional[Any] = in_proj_weight[: hidden_size, :]
_lowerCamelCase : Optional[int] = in_proj_bias[:config.hidden_size]
_lowerCamelCase : str = in_proj_weight[hidden_size : hidden_size * 2, :]
_lowerCamelCase : Dict = in_proj_bias[hidden_size : hidden_size * 2]
_lowerCamelCase : Any = in_proj_weight[-hidden_size :, :]
_lowerCamelCase : Any = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
_lowerCamelCase : Optional[int] = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight' )
_lowerCamelCase : List[Any] = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : Tuple = in_proj_weight[: hidden_size, :]
_lowerCamelCase : str = in_proj_bias[:config.hidden_size]
_lowerCamelCase : str = in_proj_weight[hidden_size : hidden_size * 2, :]
_lowerCamelCase : Optional[int] = in_proj_bias[hidden_size : hidden_size * 2]
_lowerCamelCase : int = in_proj_weight[-hidden_size :, :]
_lowerCamelCase : Optional[Any] = in_proj_bias[-hidden_size :]
# fmt: on
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : List[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : Optional[Any] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : bool = False ):
"""simple docstring"""
_lowerCamelCase : Tuple = get_maskformer_config(_lowerCAmelCase )
# load original state_dict
with open(_lowerCAmelCase , "rb" ) as f:
_lowerCamelCase : List[Any] = pickle.load(_lowerCAmelCase )
_lowerCamelCase : Optional[Any] = data["model"]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
_lowerCamelCase : List[Any] = create_rename_keys(_lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_swin_q_k_v(_lowerCAmelCase , config.backbone_config )
read_in_decoder_q_k_v(_lowerCAmelCase , _lowerCAmelCase )
# update to torch tensors
for key, value in state_dict.items():
_lowerCamelCase : Dict = torch.from_numpy(_lowerCAmelCase )
# load 🤗 model
_lowerCamelCase : int = MaskFormerForInstanceSegmentation(_lowerCAmelCase )
model.eval()
for name, param in model.named_parameters():
print(_lowerCAmelCase , param.shape )
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(_lowerCAmelCase ) == 0, F'Unexpected keys: {unexpected_keys}'
# verify results
_lowerCamelCase : Any = prepare_img()
if "vistas" in model_name:
_lowerCamelCase : Any = 65
elif "cityscapes" in model_name:
_lowerCamelCase : Optional[Any] = 65535
else:
_lowerCamelCase : str = 255
_lowerCamelCase : List[str] = True if "ade" in model_name else False
_lowerCamelCase : Union[str, Any] = MaskFormerImageProcessor(ignore_index=_lowerCAmelCase , reduce_labels=_lowerCAmelCase )
_lowerCamelCase : int = image_processor(_lowerCAmelCase , return_tensors="pt" )
_lowerCamelCase : Tuple = model(**_lowerCAmelCase )
print("Logits:" , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
_lowerCamelCase : Tuple = torch.tensor(
[[3.6_3_5_3, -4.4_7_7_0, -2.6_0_6_5], [0.5_0_8_1, -4.2_3_9_4, -3.5_3_4_3], [2.1_9_0_9, -5.0_3_5_3, -1.9_3_2_3]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , _lowerCAmelCase , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F'Saving model and image processor to {pytorch_dump_folder_path}' )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
image_processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
print("Pushing model and image processor to the hub..." )
model.push_to_hub(F'nielsr/{model_name}' )
image_processor.push_to_hub(F'nielsr/{model_name}' )
if __name__ == "__main__":
UpperCAmelCase_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='maskformer-swin-tiny-ade',
type=str,
help=('Name of the MaskFormer model you\'d like to convert',),
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl',
type=str,
help='Path to the original state dict (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
UpperCAmelCase_ : int = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 44 | 0 |
'''simple docstring'''
lowercase__ : Optional[int] = 2_56
# Modulus to hash a string
lowercase__ : Any = 1_00_00_03
def a__ ( lowercase : str, lowercase : str ) -> bool:
"""simple docstring"""
_UpperCamelCase = len(lowercase )
_UpperCamelCase = len(lowercase )
if p_len > t_len:
return False
_UpperCamelCase = 0
_UpperCamelCase = 0
_UpperCamelCase = 1
# Calculating the hash of pattern and substring of text
for i in range(lowercase ):
_UpperCamelCase = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
_UpperCamelCase = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
_UpperCamelCase = (modulus_power * alphabet_size) % modulus
for i in range(0, t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
_UpperCamelCase = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def a__ ( ) -> None:
"""simple docstring"""
_UpperCamelCase = '''abc1abc12'''
_UpperCamelCase = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
_UpperCamelCase = '''alskfjaldsk23adsfabcabc'''
assert rabin_karp(lowercase, lowercase ) and not rabin_karp(lowercase, lowercase )
# Test 2)
_UpperCamelCase = '''ABABX'''
_UpperCamelCase = '''ABABZABABYABABX'''
assert rabin_karp(lowercase, lowercase )
# Test 3)
_UpperCamelCase = '''AAAB'''
_UpperCamelCase = '''ABAAAAAB'''
assert rabin_karp(lowercase, lowercase )
# Test 4)
_UpperCamelCase = '''abcdabcy'''
_UpperCamelCase = '''abcxabcdabxabcdabcdabcy'''
assert rabin_karp(lowercase, lowercase )
# Test 5)
_UpperCamelCase = '''Lü'''
_UpperCamelCase = '''Lüsai'''
assert rabin_karp(lowercase, lowercase )
_UpperCamelCase = '''Lue'''
assert not rabin_karp(lowercase, lowercase )
print('''Success.''' )
if __name__ == "__main__":
test_rabin_karp()
| 98 |
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = range(2, 20 + 1)
UpperCAmelCase_ : str = [10**k for k in range(ks[-1] + 1)]
UpperCAmelCase_ : dict[int, dict[int, list[list[int]]]] = {}
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = sum(a_i[j] for j in range(_lowerCAmelCase , len(_lowerCAmelCase ) ) )
_lowerCamelCase : List[str] = sum(a_i[j] * base[j] for j in range(min(len(_lowerCAmelCase ) , _lowerCAmelCase ) ) )
_lowerCamelCase , _lowerCamelCase : int = 0, 0
_lowerCamelCase : Dict = n - i
_lowerCamelCase : int = memo.get(_lowerCAmelCase )
if sub_memo is not None:
_lowerCamelCase : List[str] = sub_memo.get(_lowerCAmelCase )
if jumps is not None and len(_lowerCAmelCase ) > 0:
# find and make the largest jump without going over
_lowerCamelCase : List[Any] = -1
for _k in range(len(_lowerCAmelCase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
_lowerCamelCase : Any = _k
break
if max_jump >= 0:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str = jumps[max_jump]
# since the difference between jumps is cached, add c
_lowerCamelCase : str = diff + c
for j in range(min(_lowerCAmelCase , len(_lowerCAmelCase ) ) ):
_lowerCamelCase , _lowerCamelCase : List[Any] = divmod(_lowerCAmelCase , 10 )
if new_c > 0:
add(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
_lowerCamelCase : int = []
else:
_lowerCamelCase : Tuple = {c: []}
_lowerCamelCase : Any = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
_lowerCamelCase , _lowerCamelCase : Optional[int] = next_term(_lowerCAmelCase , k - 1 , i + dn , _lowerCAmelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
_lowerCamelCase , _lowerCamelCase : List[str] = compute(_lowerCAmelCase , _lowerCAmelCase , i + dn , _lowerCAmelCase )
diff += _diff
dn += terms_jumped
_lowerCamelCase : List[str] = sub_memo[c]
# keep jumps sorted by # of terms skipped
_lowerCamelCase : int = 0
while j < len(_lowerCAmelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(_lowerCAmelCase , (diff, dn, k) )
return (diff, dn)
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Any , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any] ):
"""simple docstring"""
if i >= n:
return 0, i
if k > len(_lowerCAmelCase ):
a_i.extend([0 for _ in range(k - len(_lowerCAmelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
_lowerCamelCase : List[str] = i
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Any = 0, 0, 0
for j in range(len(_lowerCAmelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
_lowerCamelCase : int = ds_c + ds_b
diff += addend
_lowerCamelCase : List[str] = 0
for j in range(_lowerCAmelCase ):
_lowerCamelCase : List[Any] = a_i[j] + addend
_lowerCamelCase , _lowerCamelCase : Any = divmod(_lowerCAmelCase , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return diff, i - start_i
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : List[Any] ):
"""simple docstring"""
for j in range(_lowerCAmelCase , len(_lowerCAmelCase ) ):
_lowerCamelCase : Tuple = digits[j] + addend
if s >= 10:
_lowerCamelCase , _lowerCamelCase : Optional[int] = divmod(_lowerCAmelCase , 10 )
_lowerCamelCase : Any = addend // 10 + quotient
else:
_lowerCamelCase : Tuple = s
_lowerCamelCase : List[Any] = addend // 10
if addend == 0:
break
while addend > 0:
_lowerCamelCase , _lowerCamelCase : str = divmod(_lowerCAmelCase , 10 )
digits.append(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : int = 10**15 ):
"""simple docstring"""
_lowerCamelCase : Tuple = [1]
_lowerCamelCase : List[Any] = 1
_lowerCamelCase : List[str] = 0
while True:
_lowerCamelCase , _lowerCamelCase : Dict = next_term(_lowerCAmelCase , 20 , i + dn , _lowerCAmelCase )
dn += terms_jumped
if dn == n - i:
break
_lowerCamelCase : Optional[Any] = 0
for j in range(len(_lowerCAmelCase ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f'''{solution() = }''')
| 44 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
'google/pegasus-large': 'https://huggingface.co/google/pegasus-large/resolve/main/config.json',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class __UpperCAmelCase ( __A ):
"""simple docstring"""
_lowerCamelCase = """pegasus"""
_lowerCamelCase = ["""past_key_values"""]
_lowerCamelCase = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , __A=50265 , __A=1024 , __A=12 , __A=4096 , __A=16 , __A=12 , __A=4096 , __A=16 , __A=0.0 , __A=0.0 , __A=True , __A=True , __A="gelu" , __A=1024 , __A=0.1 , __A=0.0 , __A=0.0 , __A=0.02 , __A=0 , __A=False , __A=0 , __A=1 , __A=1 , **__A , ):
__a = vocab_size
__a = max_position_embeddings
__a = d_model
__a = encoder_ffn_dim
__a = encoder_layers
__a = encoder_attention_heads
__a = decoder_ffn_dim
__a = decoder_layers
__a = decoder_attention_heads
__a = dropout
__a = attention_dropout
__a = activation_dropout
__a = activation_function
__a = init_std
__a = encoder_layerdrop
__a = decoder_layerdrop
__a = use_cache
__a = encoder_layers
__a = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__A , eos_token_id=__A , is_encoder_decoder=__A , decoder_start_token_id=__A , forced_eos_token_id=__A , **__A , )
@property
def snake_case_ ( self ):
return self.encoder_attention_heads
@property
def snake_case_ ( self ):
return self.d_model
| 99 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
UpperCAmelCase_ : Any = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
lowerCAmelCase_ = field(default=A , metadata={'help': 'Whether tp freeze the encoder.'} )
lowerCAmelCase_ = field(default=A , metadata={'help': 'Whether to freeze the embeddings.'} )
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
lowerCAmelCase_ = field(
default='summarization' , metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'} , )
lowerCAmelCase_ = field(
default=1024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase_ = field(
default=128 , metadata={
'help': (
'The maximum total sequence length for target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase_ = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for validation target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded. '
'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '
'during ``evaluate`` and ``predict``.'
)
} , )
lowerCAmelCase_ = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for test target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase_ = field(default=-1 , metadata={'help': '# training examples. -1 means use all.'} )
lowerCAmelCase_ = field(default=-1 , metadata={'help': '# validation examples. -1 means use all.'} )
lowerCAmelCase_ = field(default=-1 , metadata={'help': '# test examples. -1 means use all.'} )
lowerCAmelCase_ = field(default=A , metadata={'help': 'Source language id for translation.'} )
lowerCAmelCase_ = field(default=A , metadata={'help': 'Target language id for translation.'} )
lowerCAmelCase_ = field(default=A , metadata={'help': '# num_beams to use for evaluation.'} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'} , )
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Any ):
"""simple docstring"""
logger.info(F'***** {split} metrics *****' )
for key in sorted(metrics.keys() ):
logger.info(F' {key} = {metrics[key]}' )
save_json(_lowerCAmelCase , os.path.join(_lowerCAmelCase , F'{split}_results.json' ) )
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : str = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Optional[Any] = parser.parse_args_into_dataclasses()
check_output_dir(_lowerCAmelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s" , _lowerCAmelCase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowerCamelCase : Optional[int] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_lowerCamelCase : Tuple = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
assert hasattr(_lowerCAmelCase , _lowerCAmelCase ), F'({config.__class__.__name__}) doesn\'t have a `{p}` attribute'
setattr(_lowerCAmelCase , _lowerCAmelCase , getattr(_lowerCAmelCase , _lowerCAmelCase ) )
_lowerCamelCase : List[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_lowerCamelCase : int = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=".ckpt" in model_args.model_name_or_path , config=_lowerCAmelCase , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(_lowerCAmelCase , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
_lowerCamelCase : List[Any] = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(_lowerCAmelCase , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCamelCase : Any = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
_lowerCamelCase : int = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(_lowerCAmelCase )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
_lowerCamelCase : int = SeqaSeqDataset
# Get datasets
_lowerCamelCase : Tuple = (
dataset_class(
_lowerCAmelCase , type_path="train" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_train
else None
)
_lowerCamelCase : List[Any] = (
dataset_class(
_lowerCAmelCase , type_path="val" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
_lowerCamelCase : Optional[int] = (
dataset_class(
_lowerCAmelCase , type_path="test" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
_lowerCamelCase : int = (
build_compute_metrics_fn(data_args.task , _lowerCAmelCase ) if training_args.predict_with_generate else None
)
_lowerCamelCase : List[Any] = SeqaSeqTrainer(
model=_lowerCAmelCase , args=_lowerCAmelCase , data_args=_lowerCAmelCase , train_dataset=_lowerCAmelCase , eval_dataset=_lowerCAmelCase , data_collator=SeqaSeqDataCollator(
_lowerCAmelCase , _lowerCAmelCase , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=_lowerCAmelCase , tokenizer=_lowerCAmelCase , )
_lowerCamelCase : Optional[Any] = {}
# Training
if training_args.do_train:
logger.info("*** Train ***" )
_lowerCamelCase : Optional[Any] = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
_lowerCamelCase : int = train_result.metrics
_lowerCamelCase : Optional[int] = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("train" , _lowerCAmelCase , training_args.output_dir )
all_metrics.update(_lowerCAmelCase )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
_lowerCamelCase : Optional[Any] = trainer.evaluate(metric_key_prefix="val" )
_lowerCamelCase : Dict = data_args.n_val
_lowerCamelCase : List[Any] = round(metrics["val_loss"] , 4 )
if trainer.is_world_process_zero():
handle_metrics("val" , _lowerCAmelCase , training_args.output_dir )
all_metrics.update(_lowerCAmelCase )
if training_args.do_predict:
logger.info("*** Predict ***" )
_lowerCamelCase : Any = trainer.predict(test_dataset=_lowerCAmelCase , metric_key_prefix="test" )
_lowerCamelCase : Dict = test_output.metrics
_lowerCamelCase : Optional[int] = data_args.n_test
if trainer.is_world_process_zero():
_lowerCamelCase : int = round(metrics["test_loss"] , 4 )
handle_metrics("test" , _lowerCAmelCase , training_args.output_dir )
all_metrics.update(_lowerCAmelCase )
if training_args.predict_with_generate:
_lowerCamelCase : List[str] = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase )
_lowerCamelCase : Any = lmap(str.strip , _lowerCAmelCase )
write_txt_file(_lowerCAmelCase , os.path.join(training_args.output_dir , "test_generations.txt" ) )
if trainer.is_world_process_zero():
save_json(_lowerCAmelCase , os.path.join(training_args.output_dir , "all_results.json" ) )
return all_metrics
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 44 | 0 |
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Dict = DanceDiffusionPipeline
lowerCamelCase__ : List[str] = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
lowerCamelCase__ : Any = PipelineTesterMixin.required_optional_params - {
"""callback""",
"""latents""",
"""callback_steps""",
"""output_type""",
"""num_images_per_prompt""",
}
lowerCamelCase__ : List[str] = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
lowerCamelCase__ : Tuple = False
lowerCamelCase__ : Dict = False
def lowercase_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=5_12 , sample_rate=1_60_00 , in_channels=2 , out_channels=2 , flip_sin_to_cos=A_ , use_timestep_embedding=A_ , time_embedding_type='''fourier''' , mid_block_type='''UNetMidBlock1D''' , down_block_types=('''DownBlock1DNoSkip''', '''DownBlock1D''', '''AttnDownBlock1D''') , up_block_types=('''AttnUpBlock1D''', '''UpBlock1D''', '''UpBlock1DNoSkip''') , )
SCREAMING_SNAKE_CASE__ = IPNDMScheduler()
SCREAMING_SNAKE_CASE__ = {
'''unet''': unet,
'''scheduler''': scheduler,
}
return components
def lowercase_ ( self , A_ , A_=0 ):
'''simple docstring'''
if str(A_ ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__ = torch.manual_seed(A_ )
else:
SCREAMING_SNAKE_CASE__ = torch.Generator(device=A_ ).manual_seed(A_ )
SCREAMING_SNAKE_CASE__ = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 4,
}
return inputs
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ = DanceDiffusionPipeline(**A_ )
SCREAMING_SNAKE_CASE__ = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(A_ )
SCREAMING_SNAKE_CASE__ = pipe(**A_ )
SCREAMING_SNAKE_CASE__ = output.audios
SCREAMING_SNAKE_CASE__ = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
SCREAMING_SNAKE_CASE__ = np.array([-0.7265, 1.0000, -0.8388, 0.1175, 0.9498, -1.0000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def lowercase_ ( self ):
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def lowercase_ ( self ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def lowercase_ ( self ):
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def lowercase_ ( self ):
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
def lowercase_ ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def lowercase_ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = torch_device
SCREAMING_SNAKE_CASE__ = DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' )
SCREAMING_SNAKE_CASE__ = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = pipe(generator=A_ , num_inference_steps=1_00 , audio_length_in_s=4.096 )
SCREAMING_SNAKE_CASE__ = output.audios
SCREAMING_SNAKE_CASE__ = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
SCREAMING_SNAKE_CASE__ = np.array([-0.0192, -0.0231, -0.0318, -0.0059, 0.0002, -0.0020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = torch_device
SCREAMING_SNAKE_CASE__ = DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = pipe(generator=A_ , num_inference_steps=1_00 , audio_length_in_s=4.096 )
SCREAMING_SNAKE_CASE__ = output.audios
SCREAMING_SNAKE_CASE__ = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
SCREAMING_SNAKE_CASE__ = np.array([-0.0367, -0.0488, -0.0771, -0.0525, -0.0444, -0.0341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
| 100 |
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase__ :
def __init__( self : List[Any],__A : str,__A : List[str]=1_3,__A : str=3_2,__A : Tuple=2,__A : Any=3,__A : Dict=1_6,__A : Dict=[3_2, 6_4, 1_2_8],__A : List[str]=[1, 2, 1],__A : str=[2, 2, 4],__A : Optional[int]=2,__A : Dict=2.0,__A : str=True,__A : Tuple=0.0,__A : int=0.0,__A : List[str]=0.1,__A : Any="gelu",__A : List[Any]=False,__A : Optional[Any]=True,__A : List[str]=0.02,__A : Tuple=1e-5,__A : Any=True,__A : Tuple=None,__A : Tuple=True,__A : Tuple=1_0,__A : List[Any]=8,__A : Optional[int]=["stage1", "stage2"],__A : int=[1, 2],):
_lowerCamelCase : List[Any] = parent
_lowerCamelCase : Optional[Any] = batch_size
_lowerCamelCase : Optional[int] = image_size
_lowerCamelCase : int = patch_size
_lowerCamelCase : Optional[Any] = num_channels
_lowerCamelCase : int = embed_dim
_lowerCamelCase : int = hidden_sizes
_lowerCamelCase : List[Any] = depths
_lowerCamelCase : Any = num_heads
_lowerCamelCase : List[str] = window_size
_lowerCamelCase : str = mlp_ratio
_lowerCamelCase : Any = qkv_bias
_lowerCamelCase : str = hidden_dropout_prob
_lowerCamelCase : str = attention_probs_dropout_prob
_lowerCamelCase : List[str] = drop_path_rate
_lowerCamelCase : str = hidden_act
_lowerCamelCase : Union[str, Any] = use_absolute_embeddings
_lowerCamelCase : List[Any] = patch_norm
_lowerCamelCase : Tuple = layer_norm_eps
_lowerCamelCase : str = initializer_range
_lowerCamelCase : Optional[int] = is_training
_lowerCamelCase : Tuple = scope
_lowerCamelCase : List[Any] = use_labels
_lowerCamelCase : int = type_sequence_label_size
_lowerCamelCase : Tuple = encoder_stride
_lowerCamelCase : Any = out_features
_lowerCamelCase : Any = out_indices
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase : List[Any] = None
if self.use_labels:
_lowerCamelCase : str = ids_tensor([self.batch_size],self.type_sequence_label_size )
_lowerCamelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self : Union[str, Any] ):
return FocalNetConfig(
image_size=self.image_size,patch_size=self.patch_size,num_channels=self.num_channels,embed_dim=self.embed_dim,hidden_sizes=self.hidden_sizes,depths=self.depths,num_heads=self.num_heads,window_size=self.window_size,mlp_ratio=self.mlp_ratio,qkv_bias=self.qkv_bias,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,drop_path_rate=self.drop_path_rate,hidden_act=self.hidden_act,use_absolute_embeddings=self.use_absolute_embeddings,path_norm=self.patch_norm,layer_norm_eps=self.layer_norm_eps,initializer_range=self.initializer_range,encoder_stride=self.encoder_stride,out_features=self.out_features,out_indices=self.out_indices,)
def lowerCamelCase_ ( self : int,__A : Union[str, Any],__A : Tuple,__A : List[Any] ):
_lowerCamelCase : Optional[Any] = FocalNetModel(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : Optional[Any] = model(__A )
_lowerCamelCase : Optional[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_lowerCamelCase : Union[str, Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, expected_seq_len, expected_dim) )
def lowerCamelCase_ ( self : int,__A : Optional[int],__A : int,__A : Optional[int] ):
_lowerCamelCase : Any = FocalNetBackbone(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : List[str] = model(__A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ),len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ),[self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ),len(config.out_features ) )
self.parent.assertListEqual(model.channels,config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
_lowerCamelCase : List[str] = None
_lowerCamelCase : List[str] = FocalNetBackbone(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : str = model(__A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ),1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ),[self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ),1 )
self.parent.assertListEqual(model.channels,[config.hidden_sizes[-1]] )
def lowerCamelCase_ ( self : Optional[int],__A : Optional[int],__A : Dict,__A : Dict ):
_lowerCamelCase : List[Any] = FocalNetForMaskedImageModeling(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : List[str] = model(__A )
self.parent.assertEqual(
result.reconstruction.shape,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_lowerCamelCase : Dict = 1
_lowerCamelCase : Any = FocalNetForMaskedImageModeling(__A )
model.to(__A )
model.eval()
_lowerCamelCase : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase : Optional[int] = model(__A )
self.parent.assertEqual(result.reconstruction.shape,(self.batch_size, 1, self.image_size, self.image_size) )
def lowerCamelCase_ ( self : List[Any],__A : Union[str, Any],__A : List[Any],__A : Optional[Any] ):
_lowerCamelCase : Union[str, Any] = self.type_sequence_label_size
_lowerCamelCase : Optional[Any] = FocalNetForImageClassification(__A )
model.to(__A )
model.eval()
_lowerCamelCase : Optional[int] = model(__A,labels=__A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCamelCase : str = 1
_lowerCamelCase : str = FocalNetForImageClassification(__A )
model.to(__A )
model.eval()
_lowerCamelCase : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase : List[Any] = model(__A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : int = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Union[str, Any] = config_and_inputs
_lowerCamelCase : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( A , A , unittest.TestCase ):
lowerCAmelCase_ = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowerCAmelCase_ = (
{'feature-extraction': FocalNetModel, 'image-classification': FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Optional[int] = FocalNetModelTester(self )
_lowerCamelCase : int = ConfigTester(self,config_class=__A,embed_dim=3_7,has_text_modality=__A )
def lowerCamelCase_ ( self : Union[str, Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase_ ( self : List[str] ):
return
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__A )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__A )
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
@unittest.skip(reason="FocalNet does not use inputs_embeds" )
def lowerCamelCase_ ( self : Optional[int] ):
pass
@unittest.skip(reason="FocalNet does not use feedforward chunking" )
def lowerCamelCase_ ( self : List[str] ):
pass
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase , _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_lowerCamelCase : str = model_class(__A )
self.assertIsInstance(model.get_input_embeddings(),(nn.Module) )
_lowerCamelCase : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__A,nn.Linear ) )
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_lowerCamelCase : Union[str, Any] = model_class(__A )
_lowerCamelCase : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : int = [*signature.parameters.keys()]
_lowerCamelCase : Union[str, Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1],__A )
def lowerCamelCase_ ( self : Tuple,__A : Any,__A : List[Any],__A : str,__A : Any ):
_lowerCamelCase : Union[str, Any] = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
_lowerCamelCase : Optional[int] = model(**self._prepare_for_class(__A,__A ) )
_lowerCamelCase : Optional[int] = outputs.hidden_states
_lowerCamelCase : int = getattr(
self.model_tester,"expected_num_hidden_layers",len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__A ),__A )
# FocalNet has a different seq_length
_lowerCamelCase : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCamelCase : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ),[num_patches, self.model_tester.embed_dim],)
_lowerCamelCase : Any = outputs.reshaped_hidden_states
self.assertEqual(len(__A ),__A )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Tuple = reshaped_hidden_states[0].shape
_lowerCamelCase : List[str] = (
reshaped_hidden_states[0].view(__A,__A,height * width ).permute(0,2,1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ),[num_patches, self.model_tester.embed_dim],)
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase , _lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Optional[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
_lowerCamelCase : List[Any] = True
self.check_hidden_states_output(__A,__A,__A,__A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase : List[Any] = True
self.check_hidden_states_output(__A,__A,__A,__A )
def lowerCamelCase_ ( self : Optional[Any] ):
_lowerCamelCase , _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Tuple = 3
_lowerCamelCase : Optional[int] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_lowerCamelCase : Tuple = (
config.patch_size
if isinstance(config.patch_size,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCamelCase : Any = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_lowerCamelCase : int = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
_lowerCamelCase : List[Any] = True
self.check_hidden_states_output(__A,__A,__A,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase : Optional[Any] = True
self.check_hidden_states_output(__A,__A,__A,(padded_height, padded_width) )
@slow
def lowerCamelCase_ ( self : Tuple ):
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Dict = FocalNetModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase , _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Optional[Any] = _config_zero_init(__A )
for model_class in self.all_model_classes:
_lowerCamelCase : Any = model_class(config=__A )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item(),[0.0, 1.0],msg=f'Parameter {name} of model {model_class} seems not properly initialized',)
@require_vision
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self : Union[str, Any] ):
# TODO update organization
return AutoImageProcessor.from_pretrained("microsoft/focalnet-tiny" ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : Any = FocalNetForImageClassification.from_pretrained("microsoft/focalnet-tiny" ).to(__A )
_lowerCamelCase : int = self.default_image_processor
_lowerCamelCase : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
_lowerCamelCase : Dict = image_processor(images=__A,return_tensors="pt" ).to(__A )
# forward pass
with torch.no_grad():
_lowerCamelCase : Dict = model(**__A )
# verify the logits
_lowerCamelCase : List[Any] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape,__A )
_lowerCamelCase : List[str] = torch.tensor([0.2166, -0.4368, 0.2191] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3],__A,atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item(),2_8_1 )
@require_torch
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = (FocalNetBackbone,) if is_torch_available() else ()
lowerCAmelCase_ = FocalNetConfig
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : int = FocalNetModelTester(self )
| 44 | 0 |
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
'kwargs, expected', [
({'num_shards': 0, 'max_num_jobs': 1}, []),
({'num_shards': 1_0, 'max_num_jobs': 1}, [range(1_0 )]),
({'num_shards': 1_0, 'max_num_jobs': 1_0}, [range(A__, i + 1 ) for i in range(1_0 )]),
({'num_shards': 1, 'max_num_jobs': 1_0}, [range(1 )]),
({'num_shards': 1_0, 'max_num_jobs': 3}, [range(0, 4 ), range(4, 7 ), range(7, 1_0 )]),
({'num_shards': 3, 'max_num_jobs': 1_0}, [range(0, 1 ), range(1, 2 ), range(2, 3 )]),
], )
def a__ ( A__, A__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = _distribute_shards(**A__ )
assert out == expected
@pytest.mark.parametrize(
'gen_kwargs, max_num_jobs, expected', [
({'foo': 0}, 1_0, [{'foo': 0}]),
({'shards': [0, 1, 2, 3]}, 1, [{'shards': [0, 1, 2, 3]}]),
({'shards': [0, 1, 2, 3]}, 4, [{'shards': [0]}, {'shards': [1]}, {'shards': [2]}, {'shards': [3]}]),
({'shards': [0, 1]}, 4, [{'shards': [0]}, {'shards': [1]}]),
({'shards': [0, 1, 2, 3]}, 2, [{'shards': [0, 1]}, {'shards': [2, 3]}]),
], )
def a__ ( A__, A__, A__ ):
SCREAMING_SNAKE_CASE_ : List[str] = _split_gen_kwargs(A__, A__ )
assert out == expected
@pytest.mark.parametrize(
'gen_kwargs, expected', [
({'foo': 0}, 1),
({'shards': [0]}, 1),
({'shards': [0, 1, 2, 3]}, 4),
({'shards': [0, 1, 2, 3], 'foo': 0}, 4),
({'shards': [0, 1, 2, 3], 'other': (0, 1)}, 4),
({'shards': [0, 1, 2, 3], 'shards2': [0, 1]}, RuntimeError),
], )
def a__ ( A__, A__ ):
if expected is RuntimeError:
with pytest.raises(A__ ):
_number_of_shards_in_gen_kwargs(A__ )
else:
SCREAMING_SNAKE_CASE_ : str = _number_of_shards_in_gen_kwargs(A__ )
assert out == expected
| 101 |
'''simple docstring'''
class UpperCAmelCase__ :
def __init__( self : Any,__A : Any,__A : Any,__A : Any ):
_lowerCamelCase : List[Any] = name
_lowerCamelCase : Union[str, Any] = value
_lowerCamelCase : str = weight
def __repr__( self : Any ):
return f'{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'
def lowerCamelCase_ ( self : Optional[int] ):
return self.value
def lowerCamelCase_ ( self : Any ):
return self.name
def lowerCamelCase_ ( self : List[Any] ):
return self.weight
def lowerCamelCase_ ( self : str ):
return self.value / self.weight
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : str = []
for i in range(len(_lowerCAmelCase ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Dict = sorted(_lowerCAmelCase , key=_lowerCAmelCase , reverse=_lowerCAmelCase )
_lowerCamelCase : Optional[int] = []
_lowerCamelCase , _lowerCamelCase : Optional[int] = 0.0, 0.0
for i in range(len(_lowerCAmelCase ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def A_ ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44 | 0 |
"""simple docstring"""
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
UpperCamelCase : List[str] = len(SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = len(matrix[0] )
UpperCamelCase : str = min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for row in range(SCREAMING_SNAKE_CASE ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , SCREAMING_SNAKE_CASE ):
UpperCamelCase : List[Any] = matrix[col][row] / matrix[row][row]
for i in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
UpperCamelCase : Union[str, Any] = True
for i in range(row + 1 , SCREAMING_SNAKE_CASE ):
if matrix[i][row] != 0:
UpperCamelCase , UpperCamelCase : Union[str, Any] = matrix[i], matrix[row]
UpperCamelCase : Any = False
break
if reduce:
rank -= 1
for i in range(SCREAMING_SNAKE_CASE ):
UpperCamelCase : Dict = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 102 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase_ : List[Any] = {
'configuration_conditional_detr': [
'CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ConditionalDetrConfig',
'ConditionalDetrOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[int] = ['ConditionalDetrFeatureExtractor']
UpperCAmelCase_ : str = ['ConditionalDetrImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : str = [
'CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConditionalDetrForObjectDetection',
'ConditionalDetrForSegmentation',
'ConditionalDetrModel',
'ConditionalDetrPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 44 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class UpperCAmelCase ( unittest.TestCase ):
def __init__( self : Dict , __lowerCamelCase : str , __lowerCamelCase : Optional[Any]=7 , __lowerCamelCase : Union[str, Any]=3 , __lowerCamelCase : str=1_8 , __lowerCamelCase : Any=3_0 , __lowerCamelCase : Any=4_0_0 , __lowerCamelCase : Tuple=True , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Dict=True , __lowerCamelCase : Tuple=None , __lowerCamelCase : Tuple=True , __lowerCamelCase : List[str]=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , __lowerCamelCase : List[Any]=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , __lowerCamelCase : int=True , ):
"""simple docstring"""
_snake_case = size if size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
_snake_case = crop_size if crop_size is not None else {'''height''': 1_8, '''width''': 1_8}
_snake_case = parent
_snake_case = batch_size
_snake_case = num_channels
_snake_case = image_size
_snake_case = min_resolution
_snake_case = max_resolution
_snake_case = do_resize
_snake_case = size
_snake_case = do_center_crop
_snake_case = crop_size
_snake_case = do_normalize
_snake_case = image_mean
_snake_case = image_std
_snake_case = do_convert_rgb
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : List[str]=False , __lowerCamelCase : int=False , __lowerCamelCase : Any=False ):
"""simple docstring"""
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
_snake_case = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
2_5_5 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
_snake_case = []
for i in range(self.batch_size ):
_snake_case , _snake_case = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(2_5_5 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
_snake_case = [Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
if torchify:
_snake_case = [torch.from_numpy(__lowerCamelCase ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE,unittest.TestCase ):
A__ : Dict = ChineseCLIPImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
_snake_case = ChineseCLIPImageProcessingTester(self , do_center_crop=__lowerCamelCase )
@property
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
_snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , '''do_resize''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''size''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''do_center_crop''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''center_crop''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''image_mean''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''image_std''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''do_convert_rgb''' ) )
def __UpperCAmelCase ( self : str ):
"""simple docstring"""
_snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 2_2_4, '''width''': 2_2_4} )
self.assertEqual(image_processor.crop_size , {'''height''': 1_8, '''width''': 1_8} )
_snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 4_2} )
self.assertEqual(image_processor.crop_size , {'''height''': 8_4, '''width''': 8_4} )
def __UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
pass
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
# Initialize image_processing
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_snake_case = self.image_processor_tester.prepare_inputs(equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
_snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_snake_case = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
# Initialize image_processing
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_snake_case = self.image_processor_tester.prepare_inputs(equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , np.ndarray )
# Test not batched input
_snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_snake_case = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
# Initialize image_processing
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_snake_case = self.image_processor_tester.prepare_inputs(equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test not batched input
_snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_snake_case = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
@require_torch
@require_vision
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE,unittest.TestCase ):
A__ : int = ChineseCLIPImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
_snake_case = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=__lowerCamelCase )
_snake_case = 3
@property
def __UpperCAmelCase ( self : str ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self : str ):
"""simple docstring"""
_snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , '''do_resize''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''size''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''do_center_crop''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''center_crop''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''image_mean''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''image_std''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''do_convert_rgb''' ) )
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
pass
def __UpperCAmelCase ( self : str ):
"""simple docstring"""
# Initialize image_processing
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_snake_case = self.image_processor_tester.prepare_inputs(equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
_snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_snake_case = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 103 |
'''simple docstring'''
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Tuple = tmp_path / "file.csv"
_lowerCamelCase : Optional[int] = textwrap.dedent(
"\\n header1,header2\n 1,2\n 10,20\n " )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def A_ ( _lowerCAmelCase : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Any = tmp_path / "malformed_file.csv"
_lowerCamelCase : Any = textwrap.dedent(
"\\n header1,header2\n 1,2\n 10,20,\n " )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : int = tmp_path / "csv_with_image.csv"
_lowerCamelCase : int = textwrap.dedent(
F'\\n image\n {image_file}\n ' )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : Dict = tmp_path / "csv_with_label.csv"
_lowerCamelCase : int = textwrap.dedent(
"\\n label\n good\n bad\n good\n " )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def A_ ( _lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : Dict = tmp_path / "csv_with_int_list.csv"
_lowerCamelCase : Any = textwrap.dedent(
"\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n " )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : List[Any] = Csv()
_lowerCamelCase : Any = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(_lowerCAmelCase , match="Error tokenizing data" ):
for _ in generator:
pass
assert any(
record.levelname == "ERROR"
and "Failed to read file" in record.message
and os.path.basename(_lowerCAmelCase ) in record.message
for record in caplog.records )
@require_pil
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
with open(_lowerCAmelCase , encoding="utf-8" ) as f:
_lowerCamelCase : Any = f.read().splitlines()[1]
_lowerCamelCase : Optional[Any] = Csv(encoding="utf-8" , features=Features({"image": Image()} ) )
_lowerCamelCase : Union[str, Any] = csv._generate_tables([[csv_file_with_image]] )
_lowerCamelCase : List[str] = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("image" ).type == Image()()
_lowerCamelCase : int = pa_table.to_pydict()["image"]
assert generated_content == [{"path": image_file, "bytes": None}]
def A_ ( _lowerCAmelCase : List[Any] ):
"""simple docstring"""
with open(_lowerCAmelCase , encoding="utf-8" ) as f:
_lowerCamelCase : List[Any] = f.read().splitlines()[1:]
_lowerCamelCase : int = Csv(encoding="utf-8" , features=Features({"label": ClassLabel(names=["good", "bad"] )} ) )
_lowerCamelCase : Tuple = csv._generate_tables([[csv_file_with_label]] )
_lowerCamelCase : int = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("label" ).type == ClassLabel(names=["good", "bad"] )()
_lowerCamelCase : Union[str, Any] = pa_table.to_pydict()["label"]
assert generated_content == [ClassLabel(names=["good", "bad"] ).straint(_lowerCAmelCase ) for label in labels]
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : Dict = Csv(encoding="utf-8" , sep="," , converters={"int_list": lambda _lowerCAmelCase : [int(_lowerCAmelCase ) for i in x.split()]} )
_lowerCamelCase : List[Any] = csv._generate_tables([[csv_file_with_int_list]] )
_lowerCamelCase : Optional[int] = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field("int_list" ).type )
_lowerCamelCase : Optional[Any] = pa_table.to_pydict()["int_list"]
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 44 | 0 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
A__ : List[Any] = "facebook/bart-large-mnli"
A__ : int = (
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
A__ : Union[str, Any] = "text_classifier"
A__ : int = AutoTokenizer
A__ : str = AutoModelForSequenceClassification
A__ : Union[str, Any] = ["text", ["text"]]
A__ : int = ["text"]
def snake_case__ ( self ) -> Tuple:
super().setup()
A__ = self.model.config
A__ = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("entail" ):
A__ = int(SCREAMING_SNAKE_CASE__ )
if self.entailment_id == -1:
raise ValueError("Could not determine the entailment ID from the model config, please pass it at init." )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int:
A__ = labels
return self.pre_processor(
[text] * len(SCREAMING_SNAKE_CASE__ ) , [f"""This example is {label}""" for label in labels] , return_tensors="pt" , padding="max_length" , )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> Dict:
A__ = outputs.logits
A__ = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 104 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class UpperCAmelCase__ ( A , A , unittest.TestCase ):
lowerCAmelCase_ = IFInpaintingSuperResolutionPipeline
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'original_image'} )
lowerCAmelCase_ = PipelineTesterMixin.required_optional_params - {'latents'}
def lowerCamelCase_ ( self : List[str] ):
return self._get_superresolution_dummy_components()
def lowerCamelCase_ ( self : str,__A : List[str],__A : List[str]=0 ):
if str(__A ).startswith("mps" ):
_lowerCamelCase : List[str] = torch.manual_seed(__A )
else:
_lowerCamelCase : Optional[int] = torch.Generator(device=__A ).manual_seed(__A )
_lowerCamelCase : List[Any] = floats_tensor((1, 3, 1_6, 1_6),rng=random.Random(__A ) ).to(__A )
_lowerCamelCase : Any = floats_tensor((1, 3, 3_2, 3_2),rng=random.Random(__A ) ).to(__A )
_lowerCamelCase : Tuple = floats_tensor((1, 3, 3_2, 3_2),rng=random.Random(__A ) ).to(__A )
_lowerCamelCase : Dict = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available(),reason="XFormers attention is only available with CUDA and `xformers` installed",)
def lowerCamelCase_ ( self : Optional[int] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def lowerCamelCase_ ( self : Dict ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda",reason="float16 requires CUDA" )
def lowerCamelCase_ ( self : Optional[Any] ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def lowerCamelCase_ ( self : Any ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def lowerCamelCase_ ( self : Dict ):
self._test_save_load_local()
def lowerCamelCase_ ( self : Any ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2,)
| 44 | 0 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def __UpperCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : complex , lowerCamelCase_ : str = "x" , lowerCamelCase_ : float = 10**-10 , lowerCamelCase_ : int = 1 , ) -> complex:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = symbols(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : int = lambdify(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = lambdify(lowerCamelCase_ , diff(lowerCamelCase_ , lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE_ : Dict = starting_point
while True:
if diff_function(lowerCamelCase_ ) != 0:
SCREAMING_SNAKE_CASE_ : Optional[int] = prev_guess - multiplicity * func(lowerCamelCase_ ) / diff_function(
lowerCamelCase_ )
else:
raise ZeroDivisionError('Could not find root' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
SCREAMING_SNAKE_CASE_ : str = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F"""The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(F"""The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5j)}""")
# Find value of e
print(
'''The root of log(y) - 1 = 0 is ''',
F"""{newton_raphson("log(y) - 1", 2, variable="y")}""",
)
# Exponential Roots
print(
'''The root of exp(x) - 1 = 0 is''',
F"""{newton_raphson("exp(x) - 1", 10, precision=0.0_05)}""",
)
# Find root of cos(x)
print(F"""The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}""")
| 105 |
'''simple docstring'''
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class UpperCAmelCase__ ( A ):
def __init__( self : List[Any],__A : Tuple,__A : Optional[int],__A : Optional[int]=1_0_2_4,__A : int=1_0_2_4,__A : Any=3.6 ):
_lowerCamelCase : List[str] = tokenizer
_lowerCamelCase : Dict = tokenizer.bos_token_id
_lowerCamelCase : Tuple = dataset
_lowerCamelCase : Any = seq_length
_lowerCamelCase : List[Any] = seq_length * chars_per_token * num_of_sequences
def __iter__( self : Tuple ):
_lowerCamelCase : Union[str, Any] = iter(self.dataset )
_lowerCamelCase : str = True
while more_examples:
_lowerCamelCase , _lowerCamelCase : Optional[int] = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(__A )["content"] )
buffer_len += len(buffer[-1] )
except StopIteration:
_lowerCamelCase : Tuple = False
break
_lowerCamelCase : int = tokenizer(__A,truncation=__A )["input_ids"]
_lowerCamelCase : int = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0,len(__A ),self.seq_length ):
_lowerCamelCase : List[str] = all_token_ids[i : i + self.seq_length]
if len(__A ) == self.seq_length:
yield torch.tensor(__A )
def A_ ( _lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = {"streaming": True}
_lowerCamelCase : Optional[Any] = load_dataset(args.dataset_name , split="train" , **_lowerCAmelCase )
_lowerCamelCase : int = ConstantLengthDataset(_lowerCAmelCase , _lowerCAmelCase , seq_length=args.seq_length )
_lowerCamelCase : Dict = DataLoader(_lowerCAmelCase , batch_size=args.batch_size )
return eval_dataloader
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
model.eval()
_lowerCamelCase : Optional[int] = []
for step, batch in enumerate(_lowerCAmelCase ):
with torch.no_grad():
_lowerCamelCase : List[str] = model(_lowerCAmelCase , labels=_lowerCAmelCase )
_lowerCamelCase : List[Any] = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(_lowerCAmelCase ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
_lowerCamelCase : Dict = torch.mean(torch.cat(_lowerCAmelCase ) )
try:
_lowerCamelCase : List[Any] = torch.exp(_lowerCAmelCase )
except OverflowError:
_lowerCamelCase : Optional[int] = float("inf" )
return loss.item(), perplexity.item()
# Setup Accelerator
UpperCAmelCase_ : List[str] = Accelerator()
# Parse configuration
UpperCAmelCase_ : Tuple = HfArgumentParser(EvaluationArguments)
UpperCAmelCase_ : Dict = parser.parse_args()
set_seed(args.seed)
# Logging
UpperCAmelCase_ : Optional[int] = logging.getLogger(__name__)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
# Load model and tokenizer
UpperCAmelCase_ : Tuple = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
UpperCAmelCase_ : int = create_dataloader(args)
# Prepare everything with our `accelerator`.
UpperCAmelCase_, UpperCAmelCase_ : Dict = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('Evaluating and saving model after training')
UpperCAmelCase_, UpperCAmelCase_ : str = evaluate(args)
logger.info(f'''loss/eval: {eval_loss}, perplexity: {perplexity}''')
| 44 | 0 |
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : List[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any]=13 , __UpperCamelCase : Optional[int]=7 , __UpperCamelCase : str=True , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : str=True , __UpperCamelCase : Dict=True , __UpperCamelCase : Tuple=99 , __UpperCamelCase : Tuple=32 , __UpperCamelCase : int=5 , __UpperCamelCase : Dict=4 , __UpperCamelCase : Dict=37 , __UpperCamelCase : Dict="gelu" , __UpperCamelCase : int=0.1 , __UpperCamelCase : Dict=0.1 , __UpperCamelCase : List[str]=512 , __UpperCamelCase : Optional[int]=16 , __UpperCamelCase : Any=2 , __UpperCamelCase : int=0.0_2 , __UpperCamelCase : str=4 , ) -> Union[str, Any]:
A = parent
A = batch_size
A = seq_length
A = is_training
A = use_attention_mask
A = use_token_type_ids
A = use_labels
A = vocab_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_vocab_size
A = type_sequence_label_size
A = initializer_range
A = num_choices
def __UpperCamelCase ( self : Optional[int] ) -> Any:
A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A = None
if self.use_attention_mask:
A = random_attention_mask([self.batch_size, self.seq_length] )
A = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=__UpperCamelCase , )
return config, input_ids, attention_mask
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
A = self.prepare_config_and_inputs()
A , A , A = config_and_inputs
A = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class lowerCAmelCase__ ( _lowerCamelCase , unittest.TestCase ):
A_ : str = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
A = FlaxDistilBertModelTester(self )
@slow
def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
for model_class_name in self.all_model_classes:
A = model_class_name.from_pretrained('distilbert-base-uncased' )
A = model(np.ones((1, 1) ) )
self.assertIsNotNone(__UpperCamelCase )
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self : Dict ) -> Union[str, Any]:
A = FlaxDistilBertModel.from_pretrained('distilbert-base-uncased' )
A = np.array([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
A = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
A = model(__UpperCamelCase , attention_mask=__UpperCamelCase )[0]
A = (1, 11, 768)
self.assertEqual(output.shape , __UpperCamelCase )
A = np.array([[[-0.1_6_3_9, 0.3_2_9_9, 0.1_6_4_8], [-0.1_7_4_6, 0.3_2_8_9, 0.1_7_1_0], [-0.1_8_8_4, 0.3_3_5_7, 0.1_8_1_0]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , __UpperCamelCase , atol=1e-4 ) )
| 106 |
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : List[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase_ : Union[str, Any] = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
UpperCAmelCase_ : List[str] = {
'allenai/led-base-16384': 1_6384,
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = LEDTokenizer
lowerCAmelCase_ = ['input_ids', 'attention_mask']
def __init__( self : Union[str, Any],__A : List[Any]=None,__A : str=None,__A : str=None,__A : Optional[int]="replace",__A : Union[str, Any]="<s>",__A : Union[str, Any]="</s>",__A : Any="</s>",__A : Optional[int]="<s>",__A : List[str]="<unk>",__A : str="<pad>",__A : Tuple="<mask>",__A : Union[str, Any]=False,__A : Optional[int]=True,**__A : Optional[int],):
super().__init__(
__A,__A,tokenizer_file=__A,errors=__A,bos_token=__A,eos_token=__A,sep_token=__A,cls_token=__A,unk_token=__A,pad_token=__A,mask_token=__A,add_prefix_space=__A,trim_offsets=__A,**__A,)
_lowerCamelCase : List[str] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space",__A ) != add_prefix_space:
_lowerCamelCase : str = getattr(__A,pre_tok_state.pop("type" ) )
_lowerCamelCase : List[Any] = add_prefix_space
_lowerCamelCase : Tuple = pre_tok_class(**__A )
_lowerCamelCase : Optional[int] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_lowerCamelCase : List[str] = "post_processor"
_lowerCamelCase : int = getattr(self.backend_tokenizer,__A,__A )
if tokenizer_component_instance:
_lowerCamelCase : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCamelCase : str = tuple(state["sep"] )
if "cls" in state:
_lowerCamelCase : List[str] = tuple(state["cls"] )
_lowerCamelCase : Dict = False
if state.get("add_prefix_space",__A ) != add_prefix_space:
_lowerCamelCase : List[str] = add_prefix_space
_lowerCamelCase : List[Any] = True
if state.get("trim_offsets",__A ) != trim_offsets:
_lowerCamelCase : List[str] = trim_offsets
_lowerCamelCase : List[str] = True
if changes_to_apply:
_lowerCamelCase : Tuple = getattr(__A,state.pop("type" ) )
_lowerCamelCase : Any = component_class(**__A )
setattr(self.backend_tokenizer,__A,__A )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def lowerCamelCase_ ( self : str ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCamelCase_ ( self : List[str],__A : str ):
_lowerCamelCase : Optional[Any] = AddedToken(__A,lstrip=__A,rstrip=__A ) if isinstance(__A,__A ) else value
_lowerCamelCase : str = value
def lowerCamelCase_ ( self : List[str],*__A : List[Any],**__A : int ):
_lowerCamelCase : List[str] = kwargs.get("is_split_into_words",__A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : Optional[int],*__A : Optional[Any],**__A : Union[str, Any] ):
_lowerCamelCase : List[Any] = kwargs.get("is_split_into_words",__A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : Dict,__A : str,__A : Optional[str] = None ):
_lowerCamelCase : List[str] = self._tokenizer.model.save(__A,name=__A )
return tuple(__A )
def lowerCamelCase_ ( self : List[str],__A : Optional[Any],__A : List[str]=None ):
_lowerCamelCase : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self : Dict,__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : Tuple = [self.sep_token_id]
_lowerCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase_ ( self : Any,__A : Union[Dict[str, EncodedInput], BatchEncoding],__A : Optional[int] = None,__A : PaddingStrategy = PaddingStrategy.DO_NOT_PAD,__A : Optional[int] = None,__A : Optional[bool] = None,):
_lowerCamelCase : List[str] = super()._pad(
encoded_inputs=__A,max_length=__A,padding_strategy=__A,pad_to_multiple_of=__A,return_attention_mask=__A,)
# Load from model defaults
if return_attention_mask is None:
_lowerCamelCase : Any = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
_lowerCamelCase : Union[str, Any] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
_lowerCamelCase : Optional[Any] = len(encoded_inputs["global_attention_mask"] ) != len(__A )
if needs_to_be_padded:
_lowerCamelCase : str = len(__A ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
_lowerCamelCase : Tuple = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
_lowerCamelCase : int = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 44 | 0 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowercase_ ( _UpperCamelCase ):
"""simple docstring"""
__lowerCAmelCase = ["image_processor", "tokenizer"]
__lowerCAmelCase = "FlavaImageProcessor"
__lowerCAmelCase = ("BertTokenizer", "BertTokenizerFast")
def __init__( self : int, UpperCamelCase__ : str=None, UpperCamelCase__ : Any=None, **UpperCamelCase__ : List[Any] ) -> str:
_A = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.', UpperCamelCase__, )
_A = kwargs.pop('feature_extractor' )
_A = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(UpperCamelCase__, UpperCamelCase__ )
_A = self.image_processor
def __call__( self : Union[str, Any], UpperCamelCase__ : Optional[ImageInput] = None, UpperCamelCase__ : Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None, UpperCamelCase__ : bool = True, UpperCamelCase__ : Union[bool, str, PaddingStrategy] = False, UpperCamelCase__ : Union[bool, str, TruncationStrategy] = False, UpperCamelCase__ : Optional[int] = None, UpperCamelCase__ : int = 0, UpperCamelCase__ : Optional[int] = None, UpperCamelCase__ : Optional[bool] = None, UpperCamelCase__ : Optional[bool] = None, UpperCamelCase__ : Optional[bool] = None, UpperCamelCase__ : Optional[bool] = None, UpperCamelCase__ : bool = False, UpperCamelCase__ : bool = False, UpperCamelCase__ : bool = False, UpperCamelCase__ : bool = False, UpperCamelCase__ : bool = True, UpperCamelCase__ : Optional[Union[str, TensorType]] = None, **UpperCamelCase__ : Dict, ) -> List[str]:
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
_A = self.tokenizer(
text=UpperCamelCase__, add_special_tokens=UpperCamelCase__, padding=UpperCamelCase__, truncation=UpperCamelCase__, max_length=UpperCamelCase__, stride=UpperCamelCase__, pad_to_multiple_of=UpperCamelCase__, return_token_type_ids=UpperCamelCase__, return_attention_mask=UpperCamelCase__, return_overflowing_tokens=UpperCamelCase__, return_special_tokens_mask=UpperCamelCase__, return_offsets_mapping=UpperCamelCase__, return_length=UpperCamelCase__, verbose=UpperCamelCase__, return_tensors=UpperCamelCase__, **UpperCamelCase__, )
if images is not None:
_A = self.image_processor(
UpperCamelCase__, return_image_mask=UpperCamelCase__, return_codebook_pixels=UpperCamelCase__, return_tensors=UpperCamelCase__, **UpperCamelCase__, )
if text is not None and images is not None:
encoding.update(UpperCamelCase__ )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase__ ), tensor_type=UpperCamelCase__ )
def __UpperCAmelCase ( self : Union[str, Any], *UpperCamelCase__ : int, **UpperCamelCase__ : Tuple ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*UpperCamelCase__, **UpperCamelCase__ )
def __UpperCAmelCase ( self : Any, *UpperCamelCase__ : Dict, **UpperCamelCase__ : Tuple ) -> Optional[Any]:
return self.tokenizer.decode(*UpperCamelCase__, **UpperCamelCase__ )
@property
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
_A = self.tokenizer.model_input_names
_A = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __UpperCAmelCase ( self : Any ) -> Optional[Any]:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.', UpperCamelCase__, )
return self.image_processor_class
@property
def __UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.', UpperCamelCase__, )
return self.image_processor
| 107 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
def A_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int]=False ):
"""simple docstring"""
_lowerCamelCase : List[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'blocks.{i}.norm1.weight', F'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'blocks.{i}.norm1.bias', F'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((F'blocks.{i}.attn.proj.weight', F'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((F'blocks.{i}.attn.proj.bias', F'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'blocks.{i}.norm2.weight', F'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'blocks.{i}.norm2.bias', F'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc1.weight', F'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc1.bias', F'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc2.weight', F'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc2.bias', F'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_lowerCamelCase : Optional[int] = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def A_ ( _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any]=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
_lowerCamelCase : int = ""
else:
_lowerCamelCase : int = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCamelCase : Any = state_dict.pop(F'blocks.{i}.attn.qkv.weight' )
_lowerCamelCase : Tuple = state_dict.pop(F'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : List[str] = in_proj_weight[
: config.hidden_size, :
]
_lowerCamelCase : List[str] = in_proj_bias[: config.hidden_size]
_lowerCamelCase : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCamelCase : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCamelCase : Any = in_proj_weight[
-config.hidden_size :, :
]
_lowerCamelCase : List[str] = in_proj_bias[-config.hidden_size :]
def A_ ( _lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : List[str] = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = dct.pop(_lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = val
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : Optional[int] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def A_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : str = ViTConfig()
_lowerCamelCase : List[str] = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
_lowerCamelCase : Optional[Any] = True
_lowerCamelCase : Optional[Any] = int(vit_name[-12:-10] )
_lowerCamelCase : str = int(vit_name[-9:-6] )
else:
_lowerCamelCase : List[Any] = 1000
_lowerCamelCase : str = "huggingface/label-files"
_lowerCamelCase : Any = "imagenet-1k-id2label.json"
_lowerCamelCase : int = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" ) , "r" ) )
_lowerCamelCase : str = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
_lowerCamelCase : Optional[Any] = idalabel
_lowerCamelCase : List[str] = {v: k for k, v in idalabel.items()}
_lowerCamelCase : List[str] = int(vit_name[-6:-4] )
_lowerCamelCase : str = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith("tiny" ):
_lowerCamelCase : List[Any] = 192
_lowerCamelCase : Optional[int] = 768
_lowerCamelCase : Union[str, Any] = 12
_lowerCamelCase : Optional[Any] = 3
elif vit_name[9:].startswith("small" ):
_lowerCamelCase : Optional[Any] = 384
_lowerCamelCase : Optional[Any] = 1536
_lowerCamelCase : int = 12
_lowerCamelCase : List[str] = 6
else:
pass
else:
if vit_name[4:].startswith("small" ):
_lowerCamelCase : List[str] = 768
_lowerCamelCase : Optional[Any] = 2304
_lowerCamelCase : List[Any] = 8
_lowerCamelCase : List[Any] = 8
elif vit_name[4:].startswith("base" ):
pass
elif vit_name[4:].startswith("large" ):
_lowerCamelCase : List[Any] = 1024
_lowerCamelCase : Optional[Any] = 4096
_lowerCamelCase : List[Any] = 24
_lowerCamelCase : Union[str, Any] = 16
elif vit_name[4:].startswith("huge" ):
_lowerCamelCase : str = 1280
_lowerCamelCase : List[Any] = 5120
_lowerCamelCase : List[str] = 32
_lowerCamelCase : List[str] = 16
# load original model from timm
_lowerCamelCase : int = timm.create_model(_lowerCAmelCase , pretrained=_lowerCAmelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_lowerCamelCase : Any = timm_model.state_dict()
if base_model:
remove_classification_head_(_lowerCAmelCase )
_lowerCamelCase : Optional[int] = create_rename_keys(_lowerCAmelCase , _lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# load HuggingFace model
if vit_name[-5:] == "in21k":
_lowerCamelCase : int = ViTModel(_lowerCAmelCase ).eval()
else:
_lowerCamelCase : List[str] = ViTForImageClassification(_lowerCAmelCase ).eval()
model.load_state_dict(_lowerCAmelCase )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
_lowerCamelCase : Union[str, Any] = DeiTImageProcessor(size=config.image_size )
else:
_lowerCamelCase : Union[str, Any] = ViTImageProcessor(size=config.image_size )
_lowerCamelCase : Optional[int] = image_processor(images=prepare_img() , return_tensors="pt" )
_lowerCamelCase : Optional[int] = encoding["pixel_values"]
_lowerCamelCase : Union[str, Any] = model(_lowerCAmelCase )
if base_model:
_lowerCamelCase : int = timm_model.forward_features(_lowerCAmelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_lowerCAmelCase , outputs.pooler_output , atol=1E-3 )
else:
_lowerCamelCase : Union[str, Any] = timm_model(_lowerCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCAmelCase , outputs.logits , atol=1E-3 )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(F'Saving model {vit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(_lowerCAmelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_patch16_224',
type=str,
help='Name of the ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
UpperCAmelCase_ : Optional[int] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 44 | 0 |
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : Dict , lowerCamelCase : Optional[int] , lowerCamelCase : Dict ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = name
_UpperCAmelCase = val
def __str__( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return f"""{self.__class__.__name__}({self.name}, {self.val})"""
def __lt__( self : Optional[int] , lowerCamelCase : Union[str, Any] ) -> Any:
"""simple docstring"""
return self.val < other.val
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : int , lowerCamelCase : Optional[int] ) -> int:
"""simple docstring"""
_UpperCAmelCase = {}
_UpperCAmelCase = {}
_UpperCAmelCase = self.build_heap(lowerCamelCase )
def __getitem__( self : Any , lowerCamelCase : Union[str, Any] ) -> Any:
"""simple docstring"""
return self.get_value(lowerCamelCase )
def lowerCamelCase ( self : int , lowerCamelCase : Optional[Any] ) -> Dict:
"""simple docstring"""
return (idx - 1) // 2
def lowerCamelCase ( self : int , lowerCamelCase : Optional[int] ) -> int:
"""simple docstring"""
return idx * 2 + 1
def lowerCamelCase ( self : int , lowerCamelCase : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return idx * 2 + 2
def lowerCamelCase ( self : Dict , lowerCamelCase : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return self.heap_dict[key]
def lowerCamelCase ( self : Any , lowerCamelCase : Any ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = len(lowerCamelCase ) - 1
_UpperCAmelCase = self.get_parent_idx(lowerCamelCase )
for idx, i in enumerate(lowerCamelCase ):
_UpperCAmelCase = idx
_UpperCAmelCase = i.val
for i in range(lowerCamelCase , -1 , -1 ):
self.sift_down(lowerCamelCase , lowerCamelCase )
return array
def lowerCamelCase ( self : str , lowerCamelCase : List[str] , lowerCamelCase : int ) -> List[str]:
"""simple docstring"""
while True:
_UpperCAmelCase = self.get_left_child_idx(lowerCamelCase ) # noqa: E741
_UpperCAmelCase = self.get_right_child_idx(lowerCamelCase )
_UpperCAmelCase = idx
if l < len(lowerCamelCase ) and array[l] < array[idx]:
_UpperCAmelCase = l
if r < len(lowerCamelCase ) and array[r] < array[smallest]:
_UpperCAmelCase = r
if smallest != idx:
_UpperCAmelCase , _UpperCAmelCase = array[smallest], array[idx]
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
_UpperCAmelCase = smallest
else:
break
def lowerCamelCase ( self : Tuple , lowerCamelCase : Optional[int] ) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.get_parent_idx(lowerCamelCase )
while p >= 0 and self.heap[p] > self.heap[idx]:
_UpperCAmelCase , _UpperCAmelCase = self.heap[idx], self.heap[p]
_UpperCAmelCase , _UpperCAmelCase = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
_UpperCAmelCase = p
_UpperCAmelCase = self.get_parent_idx(lowerCamelCase )
def lowerCamelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
return self.heap[0]
def lowerCamelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.heap[-1], self.heap[0]
_UpperCAmelCase , _UpperCAmelCase = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
_UpperCAmelCase = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def lowerCamelCase ( self : Union[str, Any] , lowerCamelCase : List[str] ) -> List[Any]:
"""simple docstring"""
self.heap.append(lowerCamelCase )
_UpperCAmelCase = len(self.heap ) - 1
_UpperCAmelCase = node.val
self.sift_up(len(self.heap ) - 1 )
def lowerCamelCase ( self : List[Any] ) -> int:
"""simple docstring"""
return len(self.heap ) == 0
def lowerCamelCase ( self : Any , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any] ) -> List[str]:
"""simple docstring"""
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
_UpperCAmelCase = new_value
_UpperCAmelCase = new_value
self.sift_up(self.idx_of_element[node] )
__a: str = Node('''R''', -1)
__a: List[Any] = Node('''B''', 6)
__a: List[str] = Node('''A''', 3)
__a: Optional[Any] = Node('''X''', 1)
__a: Tuple = Node('''E''', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
__a: List[Any] = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('''Min Heap - before decrease key''')
for i in my_min_heap.heap:
print(i)
print('''Min Heap - After decrease key of node [B -> -17]''')
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 108 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : int = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def A_ ( _lowerCAmelCase : int = 5000 ):
"""simple docstring"""
_lowerCamelCase : Dict = [(i * (3 * i - 1)) // 2 for i in range(1 , _lowerCAmelCase )]
for i, pentagonal_i in enumerate(_lowerCAmelCase ):
for j in range(_lowerCAmelCase , len(_lowerCAmelCase ) ):
_lowerCamelCase : List[Any] = pentagonal_nums[j]
_lowerCamelCase : Any = pentagonal_i + pentagonal_j
_lowerCamelCase : Union[str, Any] = pentagonal_j - pentagonal_i
if is_pentagonal(_lowerCAmelCase ) and is_pentagonal(_lowerCAmelCase ):
return b
return -1
if __name__ == "__main__":
print(f'''{solution() = }''')
| 44 | 0 |
'''simple docstring'''
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def __magic_name__ ( ) -> Union[str, Any]:
'''simple docstring'''
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
__SCREAMING_SNAKE_CASE = """__test_patch_submodule_mock__"""
with patch_submodule(_test_patching , """os.path.join""" , __UpperCAmelCase ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def __magic_name__ ( ) -> Tuple:
'''simple docstring'''
assert _test_patching.open is open
__SCREAMING_SNAKE_CASE = """__test_patch_submodule_builtin_mock__"""
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , """open""" , __UpperCAmelCase ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def __magic_name__ ( ) -> Any:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """__test_patch_submodule_missing_mock__"""
with patch_submodule(_test_patching , """pandas.read_csv""" , __UpperCAmelCase ):
pass
def __magic_name__ ( ) -> Dict:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """__test_patch_submodule_missing_builtin_mock__"""
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , """len""" , __UpperCAmelCase ) is None
with patch_submodule(_test_patching , """len""" , __UpperCAmelCase ):
assert _test_patching.len is mock
assert _test_patching.len is len
def __magic_name__ ( ) -> Tuple:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """__test_patch_submodule_start_and_stop_mock__"""
__SCREAMING_SNAKE_CASE = patch_submodule(_test_patching , """open""" , __UpperCAmelCase )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def __magic_name__ ( ) -> Union[str, Any]:
'''simple docstring'''
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
__SCREAMING_SNAKE_CASE = """__test_patch_submodule_successive_join__"""
__SCREAMING_SNAKE_CASE = """__test_patch_submodule_successive_dirname__"""
__SCREAMING_SNAKE_CASE = """__test_patch_submodule_successive_rename__"""
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , """os.path.join""" , __UpperCAmelCase ):
with patch_submodule(_test_patching , """os.rename""" , __UpperCAmelCase ):
with patch_submodule(_test_patching , """os.path.dirname""" , __UpperCAmelCase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , """os.rename""" , __UpperCAmelCase ):
with patch_submodule(_test_patching , """os.path.join""" , __UpperCAmelCase ):
with patch_submodule(_test_patching , """os.path.dirname""" , __UpperCAmelCase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def __magic_name__ ( ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """__test_patch_submodule_doesnt_exist_mock__"""
with patch_submodule(_test_patching , """__module_that_doesn_exist__.__attribute_that_doesn_exist__""" , __UpperCAmelCase ):
pass
with patch_submodule(_test_patching , """os.__attribute_that_doesn_exist__""" , __UpperCAmelCase ):
pass
| 109 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ : List[Any] = {
'configuration_mobilebert': [
'MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'MobileBertConfig',
'MobileBertOnnxConfig',
],
'tokenization_mobilebert': ['MobileBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[Any] = ['MobileBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[str] = [
'MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileBertForMaskedLM',
'MobileBertForMultipleChoice',
'MobileBertForNextSentencePrediction',
'MobileBertForPreTraining',
'MobileBertForQuestionAnswering',
'MobileBertForSequenceClassification',
'MobileBertForTokenClassification',
'MobileBertLayer',
'MobileBertModel',
'MobileBertPreTrainedModel',
'load_tf_weights_in_mobilebert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Union[str, Any] = [
'TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileBertForMaskedLM',
'TFMobileBertForMultipleChoice',
'TFMobileBertForNextSentencePrediction',
'TFMobileBertForPreTraining',
'TFMobileBertForQuestionAnswering',
'TFMobileBertForSequenceClassification',
'TFMobileBertForTokenClassification',
'TFMobileBertMainLayer',
'TFMobileBertModel',
'TFMobileBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 44 | 0 |
"""simple docstring"""
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class a ( unittest.TestCase ):
def __init__( self , UpperCamelCase_ ):
UpperCAmelCase__ : Union[str, Any] = parent
def __snake_case ( self ):
return {}
def lowerCamelCase ( ):
UpperCAmelCase__ : Dict = '<HTML>\n\n <HEAD>\n <TITLE>sample document</TITLE>\n </HEAD>\n\n <BODY BGCOLOR="FFFFFF">\n <HR>\n <a href="http://google.com">Goog</a>\n <H1>This is one header</H1>\n <H2>This is a another Header</H2>\n <P>Travel from\n <P>\n <B>SFO to JFK</B>\n <BR>\n <B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>\n <HR>\n <div style="color:#0000FF">\n <h3>Traveler <b> name </b> is\n <p> John Doe </p>\n </div>'
UpperCAmelCase__ : Optional[int] = '\n <!DOCTYPE html>\n <html>\n <body>\n\n <h1>My First Heading</h1>\n <p>My first paragraph.</p>\n\n </body>\n </html>\n '
return [html_string_a, html_string_a]
@require_bsa
class a ( lowercase , unittest.TestCase ):
UpperCamelCase : str = MarkupLMFeatureExtractor if is_bsa_available() else None
def __snake_case ( self ):
UpperCAmelCase__ : Optional[int] = MarkupLMFeatureExtractionTester(self )
@property
def __snake_case ( self ):
return self.feature_extract_tester.prepare_feat_extract_dict()
def __snake_case ( self ):
# Initialize feature_extractor
UpperCAmelCase__ : Any = self.feature_extraction_class()
# Test not batched input
UpperCAmelCase__ : Tuple = get_html_strings()[0]
UpperCAmelCase__ : Optional[Any] = feature_extractor(UpperCamelCase_ )
# fmt: off
UpperCAmelCase__ : Union[str, Any] = [['sample document', 'Goog', 'This is one header', 'This is a another Header', 'Travel from', 'SFO to JFK', 'on May 2, 2015 at 2:00 pm. For details go to confirm.com', 'Traveler', 'name', 'is', 'John Doe']]
UpperCAmelCase__ : int = [['/html/head/title', '/html/body/a', '/html/body/h1', '/html/body/h2', '/html/body/p', '/html/body/p/p/b[1]', '/html/body/p/p/b[2]/i', '/html/body/p/p/div/h3', '/html/body/p/p/div/h3/b', '/html/body/p/p/div/h3', '/html/body/p/p/div/h3/p']]
# fmt: on
self.assertEqual(encoding.nodes , UpperCamelCase_ )
self.assertEqual(encoding.xpaths , UpperCamelCase_ )
# Test batched
UpperCAmelCase__ : List[str] = get_html_strings()
UpperCAmelCase__ : List[Any] = feature_extractor(UpperCamelCase_ )
# fmt: off
UpperCAmelCase__ : Union[str, Any] = expected_nodes + [['My First Heading', 'My first paragraph.']]
UpperCAmelCase__ : Dict = expected_xpaths + [['/html/body/h1', '/html/body/p']]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , UpperCamelCase_ )
self.assertEqual(encoding.xpaths , UpperCamelCase_ )
| 110 |
'''simple docstring'''
from __future__ import annotations
from scipy.special import comb # type: ignore
class UpperCAmelCase__ :
def __init__( self : Optional[Any],__A : list[tuple[float, float]] ):
_lowerCamelCase : Tuple = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
_lowerCamelCase : int = len(__A ) - 1
def lowerCamelCase_ ( self : Optional[int],__A : float ):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
_lowerCamelCase : list[float] = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree,__A ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(__A ),5 ) == 1
return output_values
def lowerCamelCase_ ( self : int,__A : float ):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
_lowerCamelCase : List[Any] = self.basis_function(__A )
_lowerCamelCase : str = 0.0
_lowerCamelCase : str = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def lowerCamelCase_ ( self : Optional[Any],__A : float = 0.01 ):
from matplotlib import pyplot as plt # type: ignore
_lowerCamelCase : list[float] = [] # x coordinates of points to plot
_lowerCamelCase : list[float] = [] # y coordinates of points to plot
_lowerCamelCase : Tuple = 0.0
while t <= 1:
_lowerCamelCase : str = self.bezier_curve_function(__A )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
_lowerCamelCase : List[str] = [i[0] for i in self.list_of_points]
_lowerCamelCase : Union[str, Any] = [i[1] for i in self.list_of_points]
plt.plot(
__A,__A,color="blue",label="Curve of Degree " + str(self.degree ),)
plt.scatter(__A,__A,color="red",label="Control Points" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 44 | 0 |
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> int:
if gpta_config_file == "":
UpperCamelCase = GPTaConfig()
else:
UpperCamelCase = GPTaConfig.from_json_file(_lowerCAmelCase )
UpperCamelCase = GPTaModel(_lowerCAmelCase )
# Load weights from numpy
load_tf_weights_in_gpta(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Save pytorch-model
UpperCamelCase = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
UpperCamelCase = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(F'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(model.state_dict() , _lowerCAmelCase )
print(F'Save configuration file to {pytorch_config_dump_path}' )
with open(_lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--gpt2_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--gpt2_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained OpenAI model. \n'''
'''This specifies the model architecture.'''
),
)
_snake_case = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 282 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase__ ( metaclass=A ):
lowerCAmelCase_ = ['transformers', 'torch', 'note_seq']
def __init__( self : str,*__A : List[str],**__A : List[Any] ):
requires_backends(self,["transformers", "torch", "note_seq"] )
@classmethod
def lowerCamelCase_ ( cls : Optional[Any],*__A : str,**__A : Tuple ):
requires_backends(cls,["transformers", "torch", "note_seq"] )
@classmethod
def lowerCamelCase_ ( cls : Dict,*__A : Dict,**__A : Tuple ):
requires_backends(cls,["transformers", "torch", "note_seq"] )
| 44 | 0 |
'''simple docstring'''
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("""1.6"""):
A__ : Union[str, Any] = True
from torch.cuda.amp import autocast
A__ : Tuple = logging.getLogger(__name__)
def UpperCAmelCase__ ( UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Union[str, Any]=None ) -> Optional[Any]:
return field(default_factory=lambda: default , metadata=_lowerCAmelCase )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : List[Any] = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
lowerCamelCase : Optional[Any] = field(
default=_UpperCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
lowerCamelCase : Dict = field(
default=_UpperCAmelCase , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'} )
lowerCamelCase : Tuple = field(
default=0.1 , metadata={'help': 'The dropout ratio for the attention probabilities.'} )
lowerCamelCase : Optional[int] = field(
default=0.1 , metadata={'help': 'The dropout ratio for activations inside the fully connected layer.'} )
lowerCamelCase : Optional[Any] = field(
default=0.1 , metadata={
'help': 'The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.'
} , )
lowerCamelCase : Optional[Any] = field(
default=0.1 , metadata={'help': 'The dropout probabilitiy for all 1D convolutional layers in feature extractor.'} , )
lowerCamelCase : Union[str, Any] = field(
default=0.05 , metadata={
'help': (
'Propability of each feature vector along the time axis to be chosen as the start of the vector'
'span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature'
'vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``.'
)
} , )
lowerCamelCase : int = field(default=0.0 , metadata={'help': 'The LayerDrop probability.'} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : List[Any] = field(
default=_UpperCAmelCase , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
lowerCamelCase : str = field(
default='train+validation' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
lowerCamelCase : Optional[Any] = field(
default=_UpperCAmelCase , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
lowerCamelCase : str = field(
default=_UpperCAmelCase , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
lowerCamelCase : Any = field(
default=_UpperCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
lowerCamelCase : Optional[Any] = field(
default=_UpperCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of validation examples to this '
'value if set.'
)
} , )
lowerCamelCase : List[Any] = list_field(
default=[',', '?', '.', '!', '-', ';', ':', '""', '%', '\'', '"', '�'] , metadata={'help': 'A list of characters to remove from the transcripts.'} , )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : str = 4_2
lowerCamelCase : Optional[int] = True
lowerCamelCase : Tuple = None
lowerCamelCase : Any = None
lowerCamelCase : Dict = None
lowerCamelCase : Dict = None
def __call__( self , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
# split inputs and labels since they have to be of different lenghts and need
# different padding methods
__lowerCamelCase : Optional[Any] = [{"input_values": feature["input_values"]} for feature in features]
__lowerCamelCase : List[str] = [{"input_ids": feature["labels"]} for feature in features]
__lowerCamelCase : Union[str, Any] = self.processor.pad(
__A , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
__lowerCamelCase : Union[str, Any] = self.processor.pad(
labels=__A , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors='pt' , )
# replace padding with -100 to ignore loss correctly
__lowerCamelCase : Dict = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1 ) , -1_00 )
__lowerCamelCase : Optional[Any] = labels
return batch
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
model.train()
__lowerCamelCase : Dict = self._prepare_inputs(__A )
if self.use_amp:
with autocast():
__lowerCamelCase : Union[str, Any] = self.compute_loss(__A , __A )
else:
__lowerCamelCase : Union[str, Any] = self.compute_loss(__A , __A )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
__lowerCamelCase : Optional[int] = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
__lowerCamelCase : Union[str, Any] = loss.sum() / (inputs["labels"] >= 0).sum()
else:
raise ValueError(f'{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']' )
if self.args.gradient_accumulation_steps > 1:
__lowerCamelCase : int = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(__A ).backward()
elif self.use_apex:
with amp.scale_loss(__A , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(__A )
else:
loss.backward()
return loss.detach()
def UpperCAmelCase__ ( ) -> Any:
__lowerCamelCase : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowerCamelCase : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowerCamelCase : Dict = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
__lowerCamelCase : Optional[int] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowerCamelCase : Dict = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('Training/evaluation parameters %s' , _lowerCAmelCase )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
__lowerCamelCase : List[str] = datasets.load_dataset(
'common_voice' , data_args.dataset_config_name , split=data_args.train_split_name )
__lowerCamelCase : Any = datasets.load_dataset('common_voice' , data_args.dataset_config_name , split='test' )
# Create and save tokenizer
__lowerCamelCase : List[str] = F'[{"".join(data_args.chars_to_ignore )}]'
def remove_special_characters(UpperCAmelCase_ : Optional[int] ):
__lowerCamelCase : List[str] = re.sub(_lowerCAmelCase , '' , batch['sentence'] ).lower() + " "
return batch
__lowerCamelCase : int = train_dataset.map(_lowerCAmelCase , remove_columns=['sentence'] )
__lowerCamelCase : Optional[int] = eval_dataset.map(_lowerCAmelCase , remove_columns=['sentence'] )
def extract_all_chars(UpperCAmelCase_ : Optional[Any] ):
__lowerCamelCase : List[Any] = " ".join(batch['text'] )
__lowerCamelCase : List[Any] = list(set(_lowerCAmelCase ) )
return {"vocab": [vocab], "all_text": [all_text]}
__lowerCamelCase : List[str] = train_dataset.map(
_lowerCAmelCase , batched=_lowerCAmelCase , batch_size=-1 , keep_in_memory=_lowerCAmelCase , remove_columns=train_dataset.column_names , )
__lowerCamelCase : List[str] = train_dataset.map(
_lowerCAmelCase , batched=_lowerCAmelCase , batch_size=-1 , keep_in_memory=_lowerCAmelCase , remove_columns=eval_dataset.column_names , )
__lowerCamelCase : Dict = list(set(vocab_train['vocab'][0] ) | set(vocab_test['vocab'][0] ) )
__lowerCamelCase : Any = {v: k for k, v in enumerate(_lowerCAmelCase )}
__lowerCamelCase : str = vocab_dict[" "]
del vocab_dict[" "]
__lowerCamelCase : List[str] = len(_lowerCAmelCase )
__lowerCamelCase : Optional[int] = len(_lowerCAmelCase )
with open('vocab.json' , 'w' ) as vocab_file:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowerCamelCase : str = WavaVecaCTCTokenizer(
'vocab.json' , unk_token='[UNK]' , pad_token='[PAD]' , word_delimiter_token='|' , )
__lowerCamelCase : str = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0.0 , do_normalize=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase )
__lowerCamelCase : Optional[int] = WavaVecaProcessor(feature_extractor=_lowerCAmelCase , tokenizer=_lowerCAmelCase )
__lowerCamelCase : Union[str, Any] = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction='mean' , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
__lowerCamelCase : List[Any] = min(len(_lowerCAmelCase ) , data_args.max_train_samples )
__lowerCamelCase : Any = train_dataset.select(range(_lowerCAmelCase ) )
if data_args.max_val_samples is not None:
__lowerCamelCase : str = eval_dataset.select(range(data_args.max_val_samples ) )
__lowerCamelCase : Any = torchaudio.transforms.Resample(4_80_00 , 1_60_00 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(UpperCAmelCase_ : Optional[int] ):
__lowerCamelCase : Dict = torchaudio.load(batch['path'] )
__lowerCamelCase : Dict = resampler(_lowerCAmelCase ).squeeze().numpy()
__lowerCamelCase : List[str] = 1_60_00
__lowerCamelCase : str = batch["text"]
return batch
__lowerCamelCase : Optional[Any] = train_dataset.map(
_lowerCAmelCase , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
__lowerCamelCase : Optional[int] = eval_dataset.map(
_lowerCAmelCase , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(UpperCAmelCase_ : List[str] ):
# check that all files have the correct sampling rate
assert (
len(set(batch['sampling_rate'] ) ) == 1
), F'Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}.'
__lowerCamelCase : str = processor(
audio=batch['speech'] , text=batch['target_text'] , sampling_rate=batch['sampling_rate'][0] )
batch.update(_lowerCAmelCase )
return batch
__lowerCamelCase : Tuple = train_dataset.map(
_lowerCAmelCase , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=_lowerCAmelCase , num_proc=data_args.preprocessing_num_workers , )
__lowerCamelCase : Optional[int] = eval_dataset.map(
_lowerCAmelCase , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=_lowerCAmelCase , num_proc=data_args.preprocessing_num_workers , )
# Metric
__lowerCamelCase : Optional[Any] = datasets.load_metric('wer' )
def compute_metrics(UpperCAmelCase_ : int ):
__lowerCamelCase : Any = pred.predictions
__lowerCamelCase : str = np.argmax(_lowerCAmelCase , axis=-1 )
__lowerCamelCase : Tuple = processor.tokenizer.pad_token_id
__lowerCamelCase : str = processor.batch_decode(_lowerCAmelCase )
# we do not want to group tokens when computing the metrics
__lowerCamelCase : Optional[int] = processor.batch_decode(pred.label_ids , group_tokens=_lowerCAmelCase )
__lowerCamelCase : str = wer_metric.compute(predictions=_lowerCAmelCase , references=_lowerCAmelCase )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
__lowerCamelCase : Tuple = DataCollatorCTCWithPadding(processor=_lowerCAmelCase , padding=_lowerCAmelCase )
# Initialize our Trainer
__lowerCamelCase : Any = CTCTrainer(
model=_lowerCAmelCase , data_collator=_lowerCAmelCase , args=_lowerCAmelCase , compute_metrics=_lowerCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
__lowerCamelCase : str = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
__lowerCamelCase : Optional[int] = model_args.model_name_or_path
else:
__lowerCamelCase : Optional[int] = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
__lowerCamelCase : Optional[int] = trainer.train(resume_from_checkpoint=_lowerCAmelCase )
trainer.save_model()
__lowerCamelCase : Union[str, Any] = train_result.metrics
__lowerCamelCase : Union[str, Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_lowerCAmelCase )
)
__lowerCamelCase : List[str] = min(_lowerCAmelCase , len(_lowerCAmelCase ) )
trainer.log_metrics('train' , _lowerCAmelCase )
trainer.save_metrics('train' , _lowerCAmelCase )
trainer.save_state()
# Evaluation
__lowerCamelCase : Optional[Any] = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__lowerCamelCase : Optional[Any] = trainer.evaluate()
__lowerCamelCase : Optional[int] = data_args.max_val_samples if data_args.max_val_samples is not None else len(_lowerCAmelCase )
__lowerCamelCase : List[str] = min(_lowerCAmelCase , len(_lowerCAmelCase ) )
trainer.log_metrics('eval' , _lowerCAmelCase )
trainer.save_metrics('eval' , _lowerCAmelCase )
return results
if __name__ == "__main__":
main()
| 13 |
'''simple docstring'''
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = CodeGenTokenizer
lowerCAmelCase_ = CodeGenTokenizerFast
lowerCAmelCase_ = True
lowerCAmelCase_ = {'add_prefix_space': True}
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : List[str] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCamelCase : Dict = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
_lowerCamelCase : Any = dict(zip(__A,range(len(__A ) ) ) )
_lowerCamelCase : Optional[int] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
_lowerCamelCase : Tuple = {"unk_token": "<unk>"}
_lowerCamelCase : Optional[Any] = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["vocab_file"] )
_lowerCamelCase : Dict = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file,"w",encoding="utf-8" ) as fp:
fp.write(json.dumps(__A ) + "\n" )
with open(self.merges_file,"w",encoding="utf-8" ) as fp:
fp.write("\n".join(__A ) )
def lowerCamelCase_ ( self : Dict,**__A : Tuple ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname,**__A )
def lowerCamelCase_ ( self : Union[str, Any],**__A : int ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname,**__A )
def lowerCamelCase_ ( self : str,__A : Dict ):
_lowerCamelCase : Optional[Any] = "lower newer"
_lowerCamelCase : Union[str, Any] = "lower newer"
return input_text, output_text
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : int = CodeGenTokenizer(self.vocab_file,self.merges_file,**self.special_tokens_map )
_lowerCamelCase : Any = "lower newer"
_lowerCamelCase : Optional[Any] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
_lowerCamelCase : List[Any] = tokenizer.tokenize(__A,add_prefix_space=__A )
self.assertListEqual(__A,__A )
_lowerCamelCase : Union[str, Any] = tokens + [tokenizer.unk_token]
_lowerCamelCase : Dict = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ),__A )
def lowerCamelCase_ ( self : Any ):
if not self.test_rust_tokenizer:
return
_lowerCamelCase : str = self.get_tokenizer()
_lowerCamelCase : Optional[Any] = self.get_rust_tokenizer(add_prefix_space=__A )
_lowerCamelCase : Union[str, Any] = "lower newer"
# Testing tokenization
_lowerCamelCase : List[Any] = tokenizer.tokenize(__A,add_prefix_space=__A )
_lowerCamelCase : str = rust_tokenizer.tokenize(__A )
self.assertListEqual(__A,__A )
# Testing conversion to ids without special tokens
_lowerCamelCase : str = tokenizer.encode(__A,add_special_tokens=__A,add_prefix_space=__A )
_lowerCamelCase : List[str] = rust_tokenizer.encode(__A,add_special_tokens=__A )
self.assertListEqual(__A,__A )
# Testing conversion to ids with special tokens
_lowerCamelCase : List[Any] = self.get_rust_tokenizer(add_prefix_space=__A )
_lowerCamelCase : Union[str, Any] = tokenizer.encode(__A,add_prefix_space=__A )
_lowerCamelCase : Optional[int] = rust_tokenizer.encode(__A )
self.assertListEqual(__A,__A )
# Testing the unknown token
_lowerCamelCase : Optional[int] = tokens + [rust_tokenizer.unk_token]
_lowerCamelCase : Optional[Any] = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__A ),__A )
def lowerCamelCase_ ( self : Tuple,*__A : Any,**__A : Any ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def lowerCamelCase_ ( self : int,__A : Optional[int]=1_5 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_lowerCamelCase : Tuple = self.rust_tokenizer_class.from_pretrained(__A,**__A )
# Simple input
_lowerCamelCase : Dict = "This is a simple input"
_lowerCamelCase : Any = ["This is a simple input 1", "This is a simple input 2"]
_lowerCamelCase : Tuple = ("This is a simple input", "This is a pair")
_lowerCamelCase : Tuple = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(__A,tokenizer_r.encode,__A,max_length=__A,padding="max_length" )
# Simple input
self.assertRaises(__A,tokenizer_r.encode_plus,__A,max_length=__A,padding="max_length" )
# Simple input
self.assertRaises(
__A,tokenizer_r.batch_encode_plus,__A,max_length=__A,padding="max_length",)
# Pair input
self.assertRaises(__A,tokenizer_r.encode,__A,max_length=__A,padding="max_length" )
# Pair input
self.assertRaises(__A,tokenizer_r.encode_plus,__A,max_length=__A,padding="max_length" )
# Pair input
self.assertRaises(
__A,tokenizer_r.batch_encode_plus,__A,max_length=__A,padding="max_length",)
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : str = CodeGenTokenizer.from_pretrained(self.tmpdirname,pad_token="<pad>" )
# Simple input
_lowerCamelCase : Tuple = "This is a simple input"
_lowerCamelCase : Dict = ["This is a simple input looooooooong", "This is a simple input"]
_lowerCamelCase : Dict = ("This is a simple input", "This is a pair")
_lowerCamelCase : Dict = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
_lowerCamelCase : Dict = tokenizer.pad_token_id
_lowerCamelCase : Dict = tokenizer(__A,padding="max_length",max_length=3_0,return_tensors="np" )
_lowerCamelCase : int = tokenizer(__A,padding=__A,truncate=__A,return_tensors="np" )
_lowerCamelCase : List[Any] = tokenizer(*__A,padding="max_length",max_length=6_0,return_tensors="np" )
_lowerCamelCase : Tuple = tokenizer(__A,padding=__A,truncate=__A,return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1],3_0 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1],3_3 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1],6_0 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1],5_2 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : List[Any] = "$$$"
_lowerCamelCase : Tuple = CodeGenTokenizer.from_pretrained(self.tmpdirname,bos_token=__A,add_bos_token=__A )
_lowerCamelCase : List[str] = "This is a simple input"
_lowerCamelCase : Optional[Any] = ["This is a simple input 1", "This is a simple input 2"]
_lowerCamelCase : Union[str, Any] = tokenizer.bos_token_id
_lowerCamelCase : Any = tokenizer(__A )
_lowerCamelCase : List[str] = tokenizer(__A )
self.assertEqual(out_s.input_ids[0],__A )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_lowerCamelCase : int = tokenizer.decode(out_s.input_ids )
_lowerCamelCase : str = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0],__A )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : int = CodeGenTokenizer.from_pretrained("Salesforce/codegen-350M-mono" )
_lowerCamelCase : Optional[Any] = "\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"
_lowerCamelCase : Dict = "\nif len_a > len_b: result = a\nelse: result = b"
_lowerCamelCase : Any = tokenizer.encode(__A )
_lowerCamelCase : str = ["^#", re.escape("<|endoftext|>" ), "^'''", "^\"\"\"", "\n\n\n"]
_lowerCamelCase : List[Any] = tokenizer.decode(__A,truncate_before_pattern=__A )
self.assertEqual(__A,__A )
def lowerCamelCase_ ( self : Any ):
pass
| 44 | 0 |
'''simple docstring'''
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def lowerCamelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : Any=None , ) -> Optional[Any]:
'''simple docstring'''
if attention_mask is None:
SCREAMING_SNAKE_CASE__ :int = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE__ :str = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
SCREAMING_SNAKE_CASE__ :Union[str, Any] = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=_lowerCAmelCase )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE__ :Dict = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=_lowerCAmelCase )
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE__ :Dict = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=_lowerCAmelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class _SCREAMING_SNAKE_CASE:
def __init__( self : List[str] , UpperCamelCase_ : Dict , UpperCamelCase_ : Tuple=13 , UpperCamelCase_ : Optional[Any]=7 , UpperCamelCase_ : Optional[Any]=True , UpperCamelCase_ : Tuple=False , UpperCamelCase_ : Optional[int]=99 , UpperCamelCase_ : List[Any]=16 , UpperCamelCase_ : str=2 , UpperCamelCase_ : Optional[Any]=4 , UpperCamelCase_ : List[Any]=4 , UpperCamelCase_ : Optional[int]="relu" , UpperCamelCase_ : Optional[Any]=0.1 , UpperCamelCase_ : Any=0.1 , UpperCamelCase_ : Optional[Any]=0.0 , UpperCamelCase_ : List[Any]=0.0 , UpperCamelCase_ : List[Any]=20 , UpperCamelCase_ : Dict=2 , UpperCamelCase_ : Any=1 , UpperCamelCase_ : Tuple=0 , ) -> str:
SCREAMING_SNAKE_CASE__ :Any = parent
SCREAMING_SNAKE_CASE__ :Dict = batch_size
SCREAMING_SNAKE_CASE__ :int = seq_length
SCREAMING_SNAKE_CASE__ :Tuple = is_training
SCREAMING_SNAKE_CASE__ :Tuple = use_labels
SCREAMING_SNAKE_CASE__ :Optional[int] = vocab_size
SCREAMING_SNAKE_CASE__ :Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE__ :Optional[int] = num_hidden_layers
SCREAMING_SNAKE_CASE__ :Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE__ :List[str] = intermediate_size
SCREAMING_SNAKE_CASE__ :str = hidden_act
SCREAMING_SNAKE_CASE__ :List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ :int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ :Any = encoder_layerdrop
SCREAMING_SNAKE_CASE__ :Union[str, Any] = decoder_layerdrop
SCREAMING_SNAKE_CASE__ :str = max_position_embeddings
SCREAMING_SNAKE_CASE__ :int = eos_token_id
SCREAMING_SNAKE_CASE__ :Optional[Any] = pad_token_id
SCREAMING_SNAKE_CASE__ :List[Any] = bos_token_id
def __lowerCamelCase ( self : Union[str, Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ :List[str] = self.eos_token_id # Eos Token
SCREAMING_SNAKE_CASE__ :Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
SCREAMING_SNAKE_CASE__ :Tuple = input_ids.clamp(self.pad_token_id + 1 )
SCREAMING_SNAKE_CASE__ :Any = decoder_input_ids.clamp(self.pad_token_id + 1 )
SCREAMING_SNAKE_CASE__ :Any = self.get_config()
SCREAMING_SNAKE_CASE__ :Tuple = prepare_mam_aaa_inputs_dict(__A , __A , __A )
return config, inputs_dict
def __lowerCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def __lowerCamelCase ( self : List[str] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ :str = self.prepare_config_and_inputs()
return config, inputs_dict
def __lowerCamelCase ( self : Optional[int] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : int ) -> Dict:
SCREAMING_SNAKE_CASE__ :Union[str, Any] = MaMaaaModel(config=__A ).get_decoder().to(__A ).eval()
SCREAMING_SNAKE_CASE__ :int = inputs_dict["input_ids"]
SCREAMING_SNAKE_CASE__ :Union[str, Any] = inputs_dict["attention_mask"]
SCREAMING_SNAKE_CASE__ :List[str] = inputs_dict["head_mask"]
# first forward pass
SCREAMING_SNAKE_CASE__ :List[Any] = model(__A , attention_mask=__A , head_mask=__A , use_cache=__A )
SCREAMING_SNAKE_CASE__ :int = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ :int = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE__ :str = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE__ :List[Any] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
SCREAMING_SNAKE_CASE__ :Tuple = model(__A , attention_mask=__A )["last_hidden_state"]
SCREAMING_SNAKE_CASE__ :Any = model(__A , attention_mask=__A , past_key_values=__A )[
"last_hidden_state"
]
# select random slice
SCREAMING_SNAKE_CASE__ :str = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE__ :Dict = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE__ :Union[str, Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__A , __A , atol=1e-2 ) )
def __lowerCamelCase ( self : Any , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Dict ) -> List[str]:
SCREAMING_SNAKE_CASE__ :Tuple = MaMaaaModel(config=__A ).to(__A ).eval()
SCREAMING_SNAKE_CASE__ :List[str] = model(**__A )
SCREAMING_SNAKE_CASE__ :Dict = outputs.encoder_last_hidden_state
SCREAMING_SNAKE_CASE__ :Any = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ :Optional[int] = model.get_encoder()
encoder.save_pretrained(__A )
SCREAMING_SNAKE_CASE__ :Dict = MaMaaaEncoder.from_pretrained(__A ).to(__A )
SCREAMING_SNAKE_CASE__ :Any = encoder(inputs_dict['input_ids'] , attention_mask=inputs_dict['attention_mask'] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ :Tuple = model.get_decoder()
decoder.save_pretrained(__A )
SCREAMING_SNAKE_CASE__ :List[str] = MaMaaaDecoder.from_pretrained(__A ).to(__A )
SCREAMING_SNAKE_CASE__ :Tuple = decoder(
input_ids=inputs_dict['decoder_input_ids'] , attention_mask=inputs_dict['decoder_attention_mask'] , encoder_hidden_states=__A , encoder_attention_mask=inputs_dict['attention_mask'] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class _SCREAMING_SNAKE_CASE( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
A_ : List[str] = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
A_ : int = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
A_ : Union[str, Any] = (
{
'conversational': MaMaaaForConditionalGeneration,
'feature-extraction': MaMaaaModel,
'summarization': MaMaaaForConditionalGeneration,
'text2text-generation': MaMaaaForConditionalGeneration,
'translation': MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
A_ : Tuple = True
A_ : List[str] = True
A_ : Dict = False
A_ : Tuple = False
def __lowerCamelCase ( self : str , UpperCamelCase_ : Any , UpperCamelCase_ : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : str , UpperCamelCase_ : str ) -> Union[str, Any]:
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def __lowerCamelCase ( self : int ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ :str = MaMaaaModelTester(self )
SCREAMING_SNAKE_CASE__ :Optional[Any] = ConfigTester(self , config_class=__A )
def __lowerCamelCase ( self : List[str] ) -> List[Any]:
self.config_tester.run_common_tests()
def __lowerCamelCase ( self : Optional[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ :Optional[int] = model_class(__A )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__A )
SCREAMING_SNAKE_CASE__ :Optional[int] = model_class.from_pretrained(__A , output_loading_info=__A )
self.assertEqual(info['missing_keys'] , [] )
def __lowerCamelCase ( self : Dict ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__A )
def __lowerCamelCase ( self : List[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ :Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__A )
def __lowerCamelCase ( self : Any ) -> str:
SCREAMING_SNAKE_CASE__ :Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
SCREAMING_SNAKE_CASE__ :int = model_class(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ :List[str] = copy.deepcopy(self._prepare_for_class(__A , __A ) )
if not self.is_encoder_decoder:
SCREAMING_SNAKE_CASE__ :List[str] = inputs["input_ids"]
del inputs["input_ids"]
else:
SCREAMING_SNAKE_CASE__ :Tuple = inputs["input_ids"]
SCREAMING_SNAKE_CASE__ :Union[str, Any] = inputs.get('decoder_input_ids' , __A )
del inputs["input_ids"]
inputs.pop('decoder_input_ids' , __A )
SCREAMING_SNAKE_CASE__ :Tuple = model.get_input_embeddings()
if not self.is_encoder_decoder:
SCREAMING_SNAKE_CASE__ :List[Any] = wte(__A )
else:
SCREAMING_SNAKE_CASE__ :List[str] = wte(__A )
SCREAMING_SNAKE_CASE__ :Dict = wte(__A )
with torch.no_grad():
model(**__A )[0]
def __lowerCamelCase ( self : Tuple ) -> Any:
SCREAMING_SNAKE_CASE__ :int = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ :Union[str, Any] = input_dict["input_ids"]
SCREAMING_SNAKE_CASE__ :Union[str, Any] = input_ids.ne(1 ).to(__A )
SCREAMING_SNAKE_CASE__ :Any = MaMaaaForConditionalGeneration(__A ).eval().to(__A )
if torch_device == "cuda":
model.half()
model.generate(__A , attention_mask=__A )
model.generate(num_beams=4 , do_sample=__A , early_stopping=__A , num_return_sequences=3 )
def lowerCamelCase ( UpperCAmelCase__ : List[str] ) -> int:
'''simple docstring'''
return torch.tensor(_lowerCAmelCase , dtype=torch.long , device=_lowerCAmelCase )
UpperCamelCase_ = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class _SCREAMING_SNAKE_CASE( unittest.TestCase ):
@cached_property
def __lowerCamelCase ( self : Optional[int] ) -> Dict:
return MaMaaaTokenizer.from_pretrained('facebook/m2m100_418M' )
def __lowerCamelCase ( self : List[Any] ) -> List[str]:
SCREAMING_SNAKE_CASE__ :Any = MaMaaaModel.from_pretrained('facebook/m2m100_418M' ).to(__A )
SCREAMING_SNAKE_CASE__ :Optional[int] = _long_tensor([[12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38, 2]] )
SCREAMING_SNAKE_CASE__ :List[Any] = _long_tensor([[2, 12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38]] )
SCREAMING_SNAKE_CASE__ :List[Any] = prepare_mam_aaa_inputs_dict(model.config , __A , __A )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ :Any = model(**__A )[0]
SCREAMING_SNAKE_CASE__ :List[Any] = torch.Size((1, 11, 10_24) )
self.assertEqual(output.shape , __A )
# change to expected output here
SCREAMING_SNAKE_CASE__ :Union[str, Any] = torch.tensor(
[[-0.7780, -0.1676, 0.1038], [-6.7556, -1.3992, 0.0567], [-7.5383, -0.5920, -0.2779]] , device=__A )
self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=__A ) )
def __lowerCamelCase ( self : Tuple ) -> int:
SCREAMING_SNAKE_CASE__ :int = MaMaaaForConditionalGeneration.from_pretrained('facebook/m2m100_418M' ).to(__A )
# change to intended input
SCREAMING_SNAKE_CASE__ :Optional[Any] = _long_tensor([[12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38, 2]] )
SCREAMING_SNAKE_CASE__ :List[str] = _long_tensor([[2, 12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38]] )
SCREAMING_SNAKE_CASE__ :List[Any] = prepare_mam_aaa_inputs_dict(model.config , __A , __A )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ :Any = model(**__A )[0]
SCREAMING_SNAKE_CASE__ :str = torch.Size((1, 11, model.config.vocab_size) )
self.assertEqual(output.shape , __A )
# change to expected output here
SCREAMING_SNAKE_CASE__ :str = torch.tensor(
[[-1.0448, -1.0411, 3.7992], [-3.2191, -3.2386, -1.3451], [-3.6210, -3.5993, 0.4925]] , device=__A )
self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=__A ) )
def __lowerCamelCase ( self : Tuple ) -> Tuple:
SCREAMING_SNAKE_CASE__ :List[str] = MaMaaaForConditionalGeneration.from_pretrained('facebook/m2m100_418M' ).to(__A )
SCREAMING_SNAKE_CASE__ :Any = MaMaaaTokenizer.from_pretrained('facebook/m2m100_418M' , src_lang='fr' , tgt_lang='en' )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = [
"L'affaire NSA souligne l'absence totale de débat sur le renseignement",
"Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.",
"Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent"
" Fabius convoque l'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de"
" l'ampleur de la surveillance américaine sur l'ensemble des communications en France.",
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
SCREAMING_SNAKE_CASE__ :str = tokenizer(__A , padding=__A , return_tensors='pt' )
SCREAMING_SNAKE_CASE__ :Tuple = model.generate(
input_ids=dct['input_ids'].to(__A ) , attention_mask=dct['attention_mask'].to(__A ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id('en' ) , )
SCREAMING_SNAKE_CASE__ :str = [
"The NSA case highlights the total absence of intelligence debate",
"I think there are two levels of response from the French government.",
"When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S."
" Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all"
" communications in France.",
]
SCREAMING_SNAKE_CASE__ :str = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=__A , skip_special_tokens=__A )
assert generated == expected_en
| 209 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class UpperCAmelCase__ :
def __init__( self : Any,__A : int=2,__A : Any=3,__A : Optional[int]=6_4,__A : Tuple=None ):
_lowerCamelCase : int = np.random.default_rng(__A )
_lowerCamelCase : List[str] = length
_lowerCamelCase : Optional[Any] = rng.normal(size=(length,) ).astype(np.floataa )
_lowerCamelCase : Optional[int] = a * self.x + b + rng.normal(scale=0.1,size=(length,) ).astype(np.floataa )
def __len__( self : Dict ):
return self.length
def __getitem__( self : str,__A : List[str] ):
return {"x": self.x[i], "y": self.y[i]}
class UpperCAmelCase__ ( torch.nn.Module ):
def __init__( self : Union[str, Any],__A : Optional[Any]=0,__A : Optional[int]=0,__A : Dict=False ):
super().__init__()
_lowerCamelCase : Tuple = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
_lowerCamelCase : List[str] = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
_lowerCamelCase : Optional[int] = True
def lowerCamelCase_ ( self : List[str],__A : Tuple=None ):
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
_lowerCamelCase : Optional[Any] = False
return x * self.a[0] + self.b[0]
class UpperCAmelCase__ ( torch.nn.Module ):
def __init__( self : Union[str, Any],__A : List[str]=0,__A : List[str]=0,__A : int=False ):
super().__init__()
_lowerCamelCase : Optional[int] = torch.nn.Parameter(torch.tensor(__A ).float() )
_lowerCamelCase : Dict = torch.nn.Parameter(torch.tensor(__A ).float() )
_lowerCamelCase : Tuple = True
def lowerCamelCase_ ( self : str,__A : List[Any]=None ):
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
_lowerCamelCase : Optional[Any] = False
return x * self.a + self.b
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : int = 16 ):
"""simple docstring"""
from datasets import load_dataset
from transformers import AutoTokenizer
_lowerCamelCase : Tuple = AutoTokenizer.from_pretrained("bert-base-cased" )
_lowerCamelCase : List[Any] = {"train": "tests/test_samples/MRPC/train.csv", "validation": "tests/test_samples/MRPC/dev.csv"}
_lowerCamelCase : int = load_dataset("csv" , data_files=_lowerCAmelCase )
_lowerCamelCase : Dict = datasets["train"].unique("label" )
_lowerCamelCase : Optional[Any] = {v: i for i, v in enumerate(_lowerCAmelCase )}
def tokenize_function(_lowerCAmelCase : int ):
# max_length=None => use the model max length (it's actually the default)
_lowerCamelCase : Optional[int] = tokenizer(
examples["sentence1"] , examples["sentence2"] , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase , padding="max_length" )
if "label" in examples:
_lowerCamelCase : str = [label_to_id[l] for l in examples["label"]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_lowerCamelCase : Optional[Any] = datasets.map(
_lowerCAmelCase , batched=_lowerCAmelCase , remove_columns=["sentence1", "sentence2", "label"] , )
def collate_fn(_lowerCAmelCase : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowerCAmelCase , padding="max_length" , max_length=128 , return_tensors="pt" )
return tokenizer.pad(_lowerCAmelCase , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
_lowerCamelCase : str = DataLoader(tokenized_datasets["train"] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=2 )
_lowerCamelCase : Optional[int] = DataLoader(tokenized_datasets["validation"] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=1 )
return train_dataloader, eval_dataloader
| 44 | 0 |
"""simple docstring"""
import os
# Precomputes a list of the 100 first triangular numbers
__A : Dict = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def lowercase ( ):
"""simple docstring"""
A__ : str =os.path.dirname(os.path.realpath(_lowerCAmelCase ) )
A__ : Tuple =os.path.join(_lowerCAmelCase , "words.txt" )
A__ : Dict =""
with open(_lowerCAmelCase ) as f:
A__ : Optional[int] =f.readline()
A__ : Optional[Any] =[word.strip("\"" ) for word in words.strip("\r\n" ).split("," )]
A__ : Optional[int] =[
word
for word in [sum(ord(_lowerCAmelCase ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(_lowerCAmelCase )
if __name__ == "__main__":
print(solution())
| 656 |
'''simple docstring'''
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Optional[Any] = False, False, False
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = None
lowerCAmelCase_ = True
lowerCAmelCase_ = True
lowerCAmelCase_ = None
# Automatically constructed
lowerCAmelCase_ = "dict"
lowerCAmelCase_ = pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
lowerCAmelCase_ = field(default='Audio' , init=A , repr=A )
def __call__( self : Tuple ):
return self.pa_type
def lowerCamelCase_ ( self : Any,__A : Union[str, bytes, dict] ):
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("To support encoding audio data, please install 'soundfile'." ) from err
if isinstance(__A,__A ):
return {"bytes": None, "path": value}
elif isinstance(__A,__A ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
_lowerCamelCase : List[Any] = BytesIO()
sf.write(__A,value["array"],value["sampling_rate"],format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("pcm" ):
# "PCM" only has raw audio bytes
if value.get("sampling_rate" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object" )
if value.get("bytes" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
_lowerCamelCase : Dict = np.frombuffer(value["bytes"],dtype=np.intaa ).astype(np.floataa ) / 3_2_7_6_7
else:
_lowerCamelCase : str = np.memmap(value["path"],dtype="h",mode="r" ).astype(np.floataa ) / 3_2_7_6_7
_lowerCamelCase : Optional[int] = BytesIO(bytes() )
sf.write(__A,__A,value["sampling_rate"],format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f'An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.' )
def lowerCamelCase_ ( self : Optional[Any],__A : dict,__A : Optional[Dict[str, Union[str, bool, None]]] = None ):
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead." )
_lowerCamelCase , _lowerCamelCase : Optional[Any] = (value["path"], BytesIO(value["bytes"] )) if value["bytes"] is not None else (value["path"], None)
if path is None and file is None:
raise ValueError(f'An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'." ) from err
_lowerCamelCase : Tuple = xsplitext(__A )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
if file is None:
_lowerCamelCase : Tuple = token_per_repo_id or {}
_lowerCamelCase : Union[str, Any] = path.split("::" )[-1]
try:
_lowerCamelCase : str = string_to_dict(__A,config.HUB_DATASETS_URL )["repo_id"]
_lowerCamelCase : str = token_per_repo_id[repo_id]
except (ValueError, KeyError):
_lowerCamelCase : Any = None
with xopen(__A,"rb",use_auth_token=__A ) as f:
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = sf.read(__A )
else:
_lowerCamelCase , _lowerCamelCase : str = sf.read(__A )
_lowerCamelCase : List[str] = array.T
if self.mono:
_lowerCamelCase : List[str] = librosa.to_mono(__A )
if self.sampling_rate and self.sampling_rate != sampling_rate:
_lowerCamelCase : List[str] = librosa.resample(__A,orig_sr=__A,target_sr=self.sampling_rate )
_lowerCamelCase : Optional[Any] = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def lowerCamelCase_ ( self : Any ):
from .features import Value
if self.decode:
raise ValueError("Cannot flatten a decoded Audio feature." )
return {
"bytes": Value("binary" ),
"path": Value("string" ),
}
def lowerCamelCase_ ( self : List[str],__A : Union[pa.StringArray, pa.StructArray] ):
if pa.types.is_string(storage.type ):
_lowerCamelCase : Any = pa.array([None] * len(__A ),type=pa.binary() )
_lowerCamelCase : int = pa.StructArray.from_arrays([bytes_array, storage],["bytes", "path"],mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
_lowerCamelCase : Dict = pa.array([None] * len(__A ),type=pa.string() )
_lowerCamelCase : Any = pa.StructArray.from_arrays([storage, path_array],["bytes", "path"],mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("array" ):
_lowerCamelCase : Tuple = pa.array([Audio().encode_example(__A ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
_lowerCamelCase : Tuple = storage.field("bytes" )
else:
_lowerCamelCase : Any = pa.array([None] * len(__A ),type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
_lowerCamelCase : List[str] = storage.field("path" )
else:
_lowerCamelCase : Tuple = pa.array([None] * len(__A ),type=pa.string() )
_lowerCamelCase : Tuple = pa.StructArray.from_arrays([bytes_array, path_array],["bytes", "path"],mask=storage.is_null() )
return array_cast(__A,self.pa_type )
def lowerCamelCase_ ( self : str,__A : pa.StructArray ):
@no_op_if_value_is_null
def path_to_bytes(__A : Dict ):
with xopen(__A,"rb" ) as f:
_lowerCamelCase : Any = f.read()
return bytes_
_lowerCamelCase : int = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
],type=pa.binary(),)
_lowerCamelCase : str = pa.array(
[os.path.basename(__A ) if path is not None else None for path in storage.field("path" ).to_pylist()],type=pa.string(),)
_lowerCamelCase : Dict = pa.StructArray.from_arrays([bytes_array, path_array],["bytes", "path"],mask=bytes_array.is_null() )
return array_cast(__A,self.pa_type )
| 44 | 0 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : int , lowerCAmelCase__ : int) -> str:
'''simple docstring'''
while b:
_lowercase : Optional[Any] = b, a % b
return a
def SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : int , lowerCAmelCase__ : int) -> str:
'''simple docstring'''
return a if b == 0 else euclidean_gcd_recursive(_lowerCAmelCase , a % b)
def SCREAMING_SNAKE_CASE ( ) -> Tuple:
'''simple docstring'''
print(F'''euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5)}''')
print(F'''euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3)}''')
print(F'''euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3)}''')
print(F'''euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6)}''')
print(F'''euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3)}''')
print(F'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5)}''')
print(F'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3)}''')
print(F'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3)}''')
print(F'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6)}''')
print(F'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3)}''')
if __name__ == "__main__":
main()
| 125 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : str = {
'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'glpn'
def __init__( self : Tuple,__A : Optional[int]=3,__A : Optional[int]=4,__A : str=[2, 2, 2, 2],__A : Union[str, Any]=[8, 4, 2, 1],__A : Tuple=[3_2, 6_4, 1_6_0, 2_5_6],__A : int=[7, 3, 3, 3],__A : str=[4, 2, 2, 2],__A : int=[1, 2, 5, 8],__A : List[Any]=[4, 4, 4, 4],__A : Optional[int]="gelu",__A : int=0.0,__A : Tuple=0.0,__A : Tuple=0.02,__A : Optional[int]=0.1,__A : Optional[int]=1e-6,__A : Optional[int]=6_4,__A : Optional[Any]=1_0,__A : Tuple=-1,**__A : List[str],):
super().__init__(**__A )
_lowerCamelCase : Tuple = num_channels
_lowerCamelCase : Union[str, Any] = num_encoder_blocks
_lowerCamelCase : Dict = depths
_lowerCamelCase : List[Any] = sr_ratios
_lowerCamelCase : str = hidden_sizes
_lowerCamelCase : Any = patch_sizes
_lowerCamelCase : Any = strides
_lowerCamelCase : Dict = mlp_ratios
_lowerCamelCase : int = num_attention_heads
_lowerCamelCase : List[Any] = hidden_act
_lowerCamelCase : str = hidden_dropout_prob
_lowerCamelCase : List[Any] = attention_probs_dropout_prob
_lowerCamelCase : Optional[int] = initializer_range
_lowerCamelCase : Union[str, Any] = drop_path_rate
_lowerCamelCase : str = layer_norm_eps
_lowerCamelCase : Tuple = decoder_hidden_size
_lowerCamelCase : int = max_depth
_lowerCamelCase : Dict = head_in_index
| 44 | 0 |
def A__ ( lowerCamelCase = 50 ) -> Optional[Any]:
UpperCamelCase_: Optional[int] = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 548 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
UpperCAmelCase_ : str = logging.get_logger(__name__)
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = ['input_features', 'attention_mask']
def __init__( self : Any,__A : List[Any]=8_0,__A : Dict=1_6_0_0_0,__A : Tuple=0.0,__A : Dict=1_0,__A : int=2_5,__A : Union[str, Any]="hamming_window",__A : List[str]=32768.0,__A : Union[str, Any]=0.97,__A : str=1.0,__A : Union[str, Any]=True,__A : Tuple=True,__A : Optional[Any]=False,**__A : Optional[Any],):
super().__init__(feature_size=__A,sampling_rate=__A,padding_value=__A,**__A )
_lowerCamelCase : Dict = feature_size
_lowerCamelCase : List[str] = sampling_rate
_lowerCamelCase : Any = padding_value
_lowerCamelCase : Dict = hop_length
_lowerCamelCase : Tuple = win_length
_lowerCamelCase : str = frame_signal_scale
_lowerCamelCase : List[str] = preemphasis_coeff
_lowerCamelCase : List[str] = mel_floor
_lowerCamelCase : str = normalize_means
_lowerCamelCase : Any = normalize_vars
_lowerCamelCase : List[str] = win_function
_lowerCamelCase : Tuple = return_attention_mask
_lowerCamelCase : List[Any] = win_length * sampling_rate // 1_0_0_0
_lowerCamelCase : List[Any] = hop_length * sampling_rate // 1_0_0_0
_lowerCamelCase : Any = optimal_fft_length(self.sample_size )
_lowerCamelCase : Dict = (self.n_fft // 2) + 1
def lowerCamelCase_ ( self : Any,__A : np.array ):
if self.win_function == "hamming_window":
_lowerCamelCase : Any = window_function(window_length=self.sample_size,name=self.win_function,periodic=__A )
else:
_lowerCamelCase : Optional[int] = window_function(window_length=self.sample_size,name=self.win_function )
_lowerCamelCase : int = mel_filter_bank(
num_frequency_bins=self.n_freqs,num_mel_filters=self.feature_size,min_frequency=0.0,max_frequency=self.sampling_rate / 2.0,sampling_rate=self.sampling_rate,)
_lowerCamelCase : List[str] = spectrogram(
one_waveform * self.frame_signal_scale,window=__A,frame_length=self.sample_size,hop_length=self.sample_stride,fft_length=self.n_fft,center=__A,preemphasis=self.preemphasis_coeff,mel_filters=__A,mel_floor=self.mel_floor,log_mel="log",)
return msfc_features.T
def lowerCamelCase_ ( self : Optional[int],__A : List[str],__A : Dict,__A : int ):
# make sure we normalize float32 arrays
if self.normalize_means:
_lowerCamelCase : Optional[Any] = x[:input_length].mean(axis=0 )
_lowerCamelCase : Optional[int] = np.subtract(__A,__A )
if self.normalize_vars:
_lowerCamelCase : int = x[:input_length].std(axis=0 )
_lowerCamelCase : Any = np.divide(__A,__A )
if input_length < x.shape[0]:
_lowerCamelCase : Tuple = padding_value
# make sure array is in float32
_lowerCamelCase : Optional[int] = x.astype(np.floataa )
return x
def lowerCamelCase_ ( self : Any,__A : List[np.ndarray],__A : Optional[np.ndarray] = None ):
_lowerCamelCase : Optional[int] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(__A,__A,self.padding_value ) for x, n in zip(__A,__A )]
def __call__( self : Optional[Any],__A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]],__A : Union[bool, str, PaddingStrategy] = False,__A : Optional[int] = None,__A : bool = False,__A : Optional[int] = None,__A : Optional[bool] = None,__A : Optional[Union[str, TensorType]] = None,__A : Optional[int] = None,**__A : Optional[Any],):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
f' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'
f' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
_lowerCamelCase : List[str] = isinstance(__A,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
_lowerCamelCase : List[str] = is_batched_numpy or (
isinstance(__A,(list, tuple) ) and (isinstance(raw_speech[0],(np.ndarray, tuple, list) ))
)
if is_batched:
_lowerCamelCase : List[Any] = [np.asarray(__A,dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__A,np.ndarray ):
_lowerCamelCase : Dict = np.asarray(__A,dtype=np.floataa )
elif isinstance(__A,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_lowerCamelCase : Tuple = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_lowerCamelCase : Tuple = [raw_speech]
# extract fbank features
_lowerCamelCase : str = [self._extract_mfsc_features(__A ) for one_waveform in raw_speech]
# convert into correct format for padding
_lowerCamelCase : Union[str, Any] = BatchFeature({"input_features": features} )
_lowerCamelCase : List[Any] = self.pad(
__A,padding=__A,max_length=__A,truncation=__A,pad_to_multiple_of=__A,return_attention_mask=__A,**__A,)
# make sure list is in array format
_lowerCamelCase : Optional[Any] = padded_inputs.get("input_features" )
if isinstance(input_features[0],__A ):
_lowerCamelCase : int = [np.asarray(__A,dtype=np.floataa ) for feature in input_features]
_lowerCamelCase : Dict = padded_inputs.get("attention_mask" )
if attention_mask is not None:
_lowerCamelCase : Dict = [np.asarray(__A,dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
_lowerCamelCase : Dict = (
np.array(__A,dtype=np.intaa )
if self._get_padding_strategies(__A,max_length=__A ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
_lowerCamelCase : Tuple = self.normalize(
padded_inputs["input_features"],attention_mask=__A )
if return_tensors is not None:
_lowerCamelCase : Dict = padded_inputs.convert_to_tensors(__A )
return padded_inputs
| 44 | 0 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
UpperCAmelCase_ : Any = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE__ :
snake_case__ : Optional[Any] = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
snake_case__ : Dict = field(
default=lowercase__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
snake_case__ : Any = field(
default=lowercase__ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
snake_case__ : Optional[Any] = field(
default=lowercase__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
snake_case__ : Dict = field(default=lowercase__ , metadata={'''help''': '''Whether tp freeze the encoder.'''} )
snake_case__ : List[Any] = field(default=lowercase__ , metadata={'''help''': '''Whether to freeze the embeddings.'''} )
@dataclass
class SCREAMING_SNAKE_CASE__ :
snake_case__ : Any = field(
metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} )
snake_case__ : Optional[int] = field(
default='''summarization''' , metadata={'''help''': '''Task name, summarization (or summarization_{dataset} for pegasus) or translation'''} , )
snake_case__ : str = field(
default=1_024 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
snake_case__ : Optional[Any] = field(
default=128 , metadata={
'''help''': (
'''The maximum total sequence length for target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
snake_case__ : Dict = field(
default=142 , metadata={
'''help''': (
'''The maximum total sequence length for validation target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded. '''
'''This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '''
'''during ``evaluate`` and ``predict``.'''
)
} , )
snake_case__ : Optional[Any] = field(
default=142 , metadata={
'''help''': (
'''The maximum total sequence length for test target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
snake_case__ : List[str] = field(default=-1 , metadata={'''help''': '''# training examples. -1 means use all.'''} )
snake_case__ : str = field(default=-1 , metadata={'''help''': '''# validation examples. -1 means use all.'''} )
snake_case__ : List[str] = field(default=-1 , metadata={'''help''': '''# test examples. -1 means use all.'''} )
snake_case__ : List[str] = field(default=lowercase__ , metadata={'''help''': '''Source language id for translation.'''} )
snake_case__ : int = field(default=lowercase__ , metadata={'''help''': '''Target language id for translation.'''} )
snake_case__ : Optional[int] = field(default=lowercase__ , metadata={'''help''': '''# num_beams to use for evaluation.'''} )
snake_case__ : str = field(
default=lowercase__ , metadata={'''help''': '''If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'''} , )
def SCREAMING_SNAKE_CASE_ ( __A : Optional[int] , __A : List[str] , __A : Any ) -> List[Any]:
"""simple docstring"""
logger.info(F"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(F""" {key} = {metrics[key]}""" )
save_json(_lowerCAmelCase , os.path.join(_lowerCAmelCase , F"""{split}_results.json""" ) )
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[Any]:
"""simple docstring"""
a_ : str = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
a_ : int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
a_ : Optional[Any] = parser.parse_args_into_dataclasses()
check_output_dir(_lowerCAmelCase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('Training/evaluation parameters %s' , _lowerCAmelCase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
a_ : Optional[int] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
a_ : Tuple = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
assert hasattr(_lowerCAmelCase , _lowerCAmelCase ), F"""({config.__class__.__name__}) doesn\'t have a `{p}` attribute"""
setattr(_lowerCAmelCase , _lowerCAmelCase , getattr(_lowerCAmelCase , _lowerCAmelCase ) )
a_ : List[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
a_ : int = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf='.ckpt' in model_args.model_name_or_path , config=_lowerCAmelCase , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(_lowerCAmelCase , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
a_ : List[Any] = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(_lowerCAmelCase , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
a_ : Any = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
a_ : int = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(_lowerCAmelCase )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
a_ : int = SeqaSeqDataset
# Get datasets
a_ : Tuple = (
dataset_class(
_lowerCAmelCase , type_path='train' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_train
else None
)
a_ : List[Any] = (
dataset_class(
_lowerCAmelCase , type_path='val' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
a_ : Optional[int] = (
dataset_class(
_lowerCAmelCase , type_path='test' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_predict
else None
)
# Initialize our Trainer
a_ : int = (
build_compute_metrics_fn(data_args.task , _lowerCAmelCase ) if training_args.predict_with_generate else None
)
a_ : List[Any] = SeqaSeqTrainer(
model=_lowerCAmelCase , args=_lowerCAmelCase , data_args=_lowerCAmelCase , train_dataset=_lowerCAmelCase , eval_dataset=_lowerCAmelCase , data_collator=SeqaSeqDataCollator(
_lowerCAmelCase , _lowerCAmelCase , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=_lowerCAmelCase , tokenizer=_lowerCAmelCase , )
a_ : Optional[Any] = {}
# Training
if training_args.do_train:
logger.info('*** Train ***' )
a_ : Optional[Any] = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
a_ : int = train_result.metrics
a_ : Optional[int] = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('train' , _lowerCAmelCase , training_args.output_dir )
all_metrics.update(_lowerCAmelCase )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , 'trainer_state.json' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
a_ : Optional[Any] = trainer.evaluate(metric_key_prefix='val' )
a_ : Dict = data_args.n_val
a_ : List[Any] = round(metrics['val_loss'] , 4 )
if trainer.is_world_process_zero():
handle_metrics('val' , _lowerCAmelCase , training_args.output_dir )
all_metrics.update(_lowerCAmelCase )
if training_args.do_predict:
logger.info('*** Predict ***' )
a_ : Any = trainer.predict(test_dataset=_lowerCAmelCase , metric_key_prefix='test' )
a_ : Dict = test_output.metrics
a_ : Optional[int] = data_args.n_test
if trainer.is_world_process_zero():
a_ : int = round(metrics['test_loss'] , 4 )
handle_metrics('test' , _lowerCAmelCase , training_args.output_dir )
all_metrics.update(_lowerCAmelCase )
if training_args.predict_with_generate:
a_ : List[str] = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase )
a_ : Any = lmap(str.strip , _lowerCAmelCase )
write_txt_file(_lowerCAmelCase , os.path.join(training_args.output_dir , 'test_generations.txt' ) )
if trainer.is_world_process_zero():
save_json(_lowerCAmelCase , os.path.join(training_args.output_dir , 'all_results.json' ) )
return all_metrics
def SCREAMING_SNAKE_CASE_ ( __A : int ) -> List[Any]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 570 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Dict = [
('bert.bert', 'visual_bert'),
('bert.cls', 'cls'),
('bert.classifier', 'cls'),
('token_type_embeddings_visual', 'visual_token_type_embeddings'),
('position_embeddings_visual', 'visual_position_embeddings'),
('projection', 'visual_projection'),
]
UpperCAmelCase_ : int = [
'nlvr2_coco_pre_trained.th',
'nlvr2_fine_tuned.th',
'nlvr2_pre_trained.th',
'vcr_coco_pre_train.th',
'vcr_fine_tune.th',
'vcr_pre_train.th',
'vqa_coco_pre_trained.th',
'vqa_fine_tuned.th',
'vqa_pre_trained.th',
]
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = torch.load(_lowerCAmelCase , map_location="cpu" )
return sd
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple=rename_keys_prefix ):
"""simple docstring"""
_lowerCamelCase : Any = OrderedDict()
_lowerCamelCase : str = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
_lowerCamelCase : Any = key
for name_pair in rename_keys_prefix:
_lowerCamelCase : Dict = new_key.replace(name_pair[0] , name_pair[1] )
_lowerCamelCase : Any = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
_lowerCamelCase : List[str] = new_d["cls.predictions.bias"]
return new_d
@torch.no_grad()
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : Dict ):
"""simple docstring"""
assert (
checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS
), F'The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'
# Get Config
if "pre" in checkpoint_path:
_lowerCamelCase : Optional[int] = "pretraining"
if "vcr" in checkpoint_path:
_lowerCamelCase : Union[str, Any] = {"visual_embedding_dim": 512}
elif "vqa_advanced" in checkpoint_path:
_lowerCamelCase : List[str] = {"visual_embedding_dim": 2048}
elif "vqa" in checkpoint_path:
_lowerCamelCase : int = {"visual_embedding_dim": 2048}
elif "nlvr" in checkpoint_path:
_lowerCamelCase : List[str] = {"visual_embedding_dim": 1024}
else:
raise NotImplementedError(F'No implementation found for `{checkpoint_path}`.' )
else:
if "vcr" in checkpoint_path:
_lowerCamelCase : Any = {"visual_embedding_dim": 512}
_lowerCamelCase : List[Any] = "multichoice"
elif "vqa_advanced" in checkpoint_path:
_lowerCamelCase : Tuple = {"visual_embedding_dim": 2048}
_lowerCamelCase : Dict = "vqa_advanced"
elif "vqa" in checkpoint_path:
_lowerCamelCase : Union[str, Any] = {"visual_embedding_dim": 2048, "num_labels": 3129}
_lowerCamelCase : Optional[int] = "vqa"
elif "nlvr" in checkpoint_path:
_lowerCamelCase : Tuple = {
"visual_embedding_dim": 1024,
"num_labels": 2,
}
_lowerCamelCase : Optional[Any] = "nlvr"
_lowerCamelCase : str = VisualBertConfig(**_lowerCAmelCase )
# Load State Dict
_lowerCamelCase : str = load_state_dict(_lowerCAmelCase )
_lowerCamelCase : List[str] = get_new_dict(_lowerCAmelCase , _lowerCAmelCase )
if model_type == "pretraining":
_lowerCamelCase : List[Any] = VisualBertForPreTraining(_lowerCAmelCase )
elif model_type == "vqa":
_lowerCamelCase : Dict = VisualBertForQuestionAnswering(_lowerCAmelCase )
elif model_type == "nlvr":
_lowerCamelCase : Tuple = VisualBertForVisualReasoning(_lowerCAmelCase )
elif model_type == "multichoice":
_lowerCamelCase : str = VisualBertForMultipleChoice(_lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
# Save Checkpoints
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('orig_checkpoint_path', type=str, help='A path to .th on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', type=str, help='Path to the output PyTorch model.')
UpperCAmelCase_ : Tuple = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 44 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : Optional[Any] = logging.get_logger(__name__)
def _A ( lowerCamelCase , lowerCamelCase=False ):
a__ : Union[str, Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
a__ : int = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase=False ):
for i in range(config.num_hidden_layers ):
if base_model:
a__ : Any = ""
else:
a__ : List[Any] = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
a__ : Tuple = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
a__ : int = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
a__ : Optional[int] = in_proj_weight[
: config.hidden_size, :
]
a__ : Any = in_proj_bias[: config.hidden_size]
a__ : Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
a__ : Union[str, Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
a__ : str = in_proj_weight[
-config.hidden_size :, :
]
a__ : Optional[int] = in_proj_bias[-config.hidden_size :]
def _A ( lowerCamelCase ):
a__ : Union[str, Any] = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCAmelCase , _lowerCAmelCase )
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
a__ : str = dct.pop(_lowerCAmelCase )
a__ : str = val
def _A ( ):
a__ : Optional[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
a__ : int = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase=True ):
a__ : Dict = ViTConfig()
# patch_size
if model_name[-1] == "8":
a__ : Dict = 8
# set labels if required
if not base_model:
a__ : Tuple = 1000
a__ : List[str] = "huggingface/label-files"
a__ : Optional[int] = "imagenet-1k-id2label.json"
a__ : Tuple = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" ) , "r" ) )
a__ : Tuple = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
a__ : Dict = idalabel
a__ : List[str] = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
a__ : Any = 384
a__ : Union[str, Any] = 1536
a__ : Union[str, Any] = 12
a__ : Optional[int] = 6
# load original model from torch hub
a__ : Optional[int] = torch.hub.load("facebookresearch/dino:main" , _lowerCAmelCase )
original_model.eval()
# load state_dict of original model, remove and rename some keys
a__ : Dict = original_model.state_dict()
if base_model:
remove_classification_head_(_lowerCAmelCase )
a__ : Dict = create_rename_keys(_lowerCAmelCase , base_model=_lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# load HuggingFace model
if base_model:
a__ : Optional[int] = ViTModel(_lowerCAmelCase , add_pooling_layer=_lowerCAmelCase ).eval()
else:
a__ : Optional[int] = ViTForImageClassification(_lowerCAmelCase ).eval()
model.load_state_dict(_lowerCAmelCase )
# Check outputs on an image, prepared by ViTImageProcessor
a__ : Optional[int] = ViTImageProcessor()
a__ : List[Any] = image_processor(images=prepare_img() , return_tensors="pt" )
a__ : Optional[Any] = encoding["pixel_values"]
a__ : Any = model(_lowerCAmelCase )
if base_model:
a__ : Optional[int] = original_model(_lowerCAmelCase )
assert torch.allclose(_lowerCAmelCase , outputs.last_hidden_state[:, 0, :] , atol=1E-1 )
else:
a__ : List[str] = original_model(_lowerCAmelCase )
assert logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCAmelCase , outputs.logits , atol=1E-3 )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCAmelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""dino_vitb16""",
type=str,
help="""Name of the model trained with DINO you\'d like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--base_model""",
action="""store_true""",
help="""Whether to only convert the base model (no projection head weights).""",
)
parser.set_defaults(base_model=True)
SCREAMING_SNAKE_CASE__ : str = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 112 |
'''simple docstring'''
import functools
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : list[int] ):
"""simple docstring"""
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or not all(isinstance(_lowerCAmelCase , _lowerCAmelCase ) for day in days ):
raise ValueError("The parameter days should be a list of integers" )
if len(_lowerCAmelCase ) != 3 or not all(isinstance(_lowerCAmelCase , _lowerCAmelCase ) for cost in costs ):
raise ValueError("The parameter costs should be a list of three integers" )
if len(_lowerCAmelCase ) == 0:
return 0
if min(_lowerCAmelCase ) <= 0:
raise ValueError("All days elements should be greater than 0" )
if max(_lowerCAmelCase ) >= 366:
raise ValueError("All days elements should be less than 366" )
_lowerCamelCase : Union[str, Any] = set(_lowerCAmelCase )
@functools.cache
def dynamic_programming(_lowerCAmelCase : int ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
lowerCAmelCase__ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['BartphoTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 596 |
'''simple docstring'''
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = SwinConfig.from_pretrained(
"microsoft/swin-tiny-patch4-window7-224" , out_features=["stage1", "stage2", "stage3", "stage4"] )
_lowerCamelCase : Dict = MaskFormerConfig(backbone_config=_lowerCAmelCase )
_lowerCamelCase : Tuple = "huggingface/label-files"
if "ade20k-full" in model_name:
# this should be ok
_lowerCamelCase : List[Any] = 847
_lowerCamelCase : str = "maskformer-ade20k-full-id2label.json"
elif "ade" in model_name:
# this should be ok
_lowerCamelCase : Optional[int] = 150
_lowerCamelCase : Union[str, Any] = "ade20k-id2label.json"
elif "coco-stuff" in model_name:
# this should be ok
_lowerCamelCase : Union[str, Any] = 171
_lowerCamelCase : str = "maskformer-coco-stuff-id2label.json"
elif "coco" in model_name:
# TODO
_lowerCamelCase : Optional[int] = 133
_lowerCamelCase : Any = "coco-panoptic-id2label.json"
elif "cityscapes" in model_name:
# this should be ok
_lowerCamelCase : str = 19
_lowerCamelCase : Tuple = "cityscapes-id2label.json"
elif "vistas" in model_name:
# this should be ok
_lowerCamelCase : List[Any] = 65
_lowerCamelCase : Optional[int] = "mapillary-vistas-id2label.json"
_lowerCamelCase : Any = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" ) , "r" ) )
_lowerCamelCase : Optional[int] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
return config
def A_ ( _lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : Any = []
# stem
# fmt: off
rename_keys.append(("backbone.patch_embed.proj.weight", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.proj.bias", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "model.pixel_level_module.encoder.model.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "model.pixel_level_module.encoder.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_index', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((F'backbone.layers.{i}.downsample.reduction.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((F'backbone.layers.{i}.downsample.norm.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((F'backbone.layers.{i}.downsample.norm.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append((F'backbone.norm{i}.weight', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.weight') )
rename_keys.append((F'backbone.norm{i}.bias', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.bias') )
# FPN
rename_keys.append(("sem_seg_head.layer_4.weight", "model.pixel_level_module.decoder.fpn.stem.0.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.weight", "model.pixel_level_module.decoder.fpn.stem.1.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.bias", "model.pixel_level_module.decoder.fpn.stem.1.bias") )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((F'sem_seg_head.adapter_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight') )
rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight') )
rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias') )
rename_keys.append(("sem_seg_head.mask_features.weight", "model.pixel_level_module.decoder.mask_projection.weight") )
rename_keys.append(("sem_seg_head.mask_features.bias", "model.pixel_level_module.decoder.mask_projection.bias") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias') )
# cross-attention out projection
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias') )
# MLP 1
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight', F'model.transformer_module.decoder.layers.{idx}.fc1.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias', F'model.transformer_module.decoder.layers.{idx}.fc1.bias') )
# MLP 2
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight', F'model.transformer_module.decoder.layers.{idx}.fc2.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias', F'model.transformer_module.decoder.layers.{idx}.fc2.bias') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias') )
# layernorm 3 (final layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias') )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.weight", "model.transformer_module.decoder.layernorm.weight") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.bias", "model.transformer_module.decoder.layernorm.bias") )
# heads on top
rename_keys.append(("sem_seg_head.predictor.query_embed.weight", "model.transformer_module.queries_embedder.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.weight", "model.transformer_module.input_projection.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.bias", "model.transformer_module.input_projection.bias") )
rename_keys.append(("sem_seg_head.predictor.class_embed.weight", "class_predictor.weight") )
rename_keys.append(("sem_seg_head.predictor.class_embed.bias", "class_predictor.bias") )
for i in range(3 ):
rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.weight', F'mask_embedder.{i}.0.weight') )
rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.bias', F'mask_embedder.{i}.0.bias') )
# fmt: on
return rename_keys
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : Tuple = dct.pop(_lowerCAmelCase )
_lowerCamelCase : str = val
def A_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : str = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_lowerCamelCase : int = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_lowerCamelCase : Union[str, Any] = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.weight' )
_lowerCamelCase : List[str] = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : Optional[int] = in_proj_weight[:dim, :]
_lowerCamelCase : Optional[int] = in_proj_bias[: dim]
_lowerCamelCase : List[str] = in_proj_weight[
dim : dim * 2, :
]
_lowerCamelCase : List[Any] = in_proj_bias[
dim : dim * 2
]
_lowerCamelCase : List[Any] = in_proj_weight[
-dim :, :
]
_lowerCamelCase : Union[str, Any] = in_proj_bias[-dim :]
# fmt: on
def A_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : int = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
_lowerCamelCase : Tuple = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight' )
_lowerCamelCase : Optional[int] = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : Optional[Any] = in_proj_weight[: hidden_size, :]
_lowerCamelCase : Optional[int] = in_proj_bias[:config.hidden_size]
_lowerCamelCase : str = in_proj_weight[hidden_size : hidden_size * 2, :]
_lowerCamelCase : Dict = in_proj_bias[hidden_size : hidden_size * 2]
_lowerCamelCase : Any = in_proj_weight[-hidden_size :, :]
_lowerCamelCase : Any = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
_lowerCamelCase : Optional[int] = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight' )
_lowerCamelCase : List[Any] = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : Tuple = in_proj_weight[: hidden_size, :]
_lowerCamelCase : str = in_proj_bias[:config.hidden_size]
_lowerCamelCase : str = in_proj_weight[hidden_size : hidden_size * 2, :]
_lowerCamelCase : Optional[int] = in_proj_bias[hidden_size : hidden_size * 2]
_lowerCamelCase : int = in_proj_weight[-hidden_size :, :]
_lowerCamelCase : Optional[Any] = in_proj_bias[-hidden_size :]
# fmt: on
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : List[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : Optional[Any] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : bool = False ):
"""simple docstring"""
_lowerCamelCase : Tuple = get_maskformer_config(_lowerCAmelCase )
# load original state_dict
with open(_lowerCAmelCase , "rb" ) as f:
_lowerCamelCase : List[Any] = pickle.load(_lowerCAmelCase )
_lowerCamelCase : Optional[Any] = data["model"]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
_lowerCamelCase : List[Any] = create_rename_keys(_lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_swin_q_k_v(_lowerCAmelCase , config.backbone_config )
read_in_decoder_q_k_v(_lowerCAmelCase , _lowerCAmelCase )
# update to torch tensors
for key, value in state_dict.items():
_lowerCamelCase : Dict = torch.from_numpy(_lowerCAmelCase )
# load 🤗 model
_lowerCamelCase : int = MaskFormerForInstanceSegmentation(_lowerCAmelCase )
model.eval()
for name, param in model.named_parameters():
print(_lowerCAmelCase , param.shape )
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(_lowerCAmelCase ) == 0, F'Unexpected keys: {unexpected_keys}'
# verify results
_lowerCamelCase : Any = prepare_img()
if "vistas" in model_name:
_lowerCamelCase : Any = 65
elif "cityscapes" in model_name:
_lowerCamelCase : Optional[Any] = 65535
else:
_lowerCamelCase : str = 255
_lowerCamelCase : List[str] = True if "ade" in model_name else False
_lowerCamelCase : Union[str, Any] = MaskFormerImageProcessor(ignore_index=_lowerCAmelCase , reduce_labels=_lowerCAmelCase )
_lowerCamelCase : int = image_processor(_lowerCAmelCase , return_tensors="pt" )
_lowerCamelCase : Tuple = model(**_lowerCAmelCase )
print("Logits:" , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
_lowerCamelCase : Tuple = torch.tensor(
[[3.6_3_5_3, -4.4_7_7_0, -2.6_0_6_5], [0.5_0_8_1, -4.2_3_9_4, -3.5_3_4_3], [2.1_9_0_9, -5.0_3_5_3, -1.9_3_2_3]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , _lowerCAmelCase , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F'Saving model and image processor to {pytorch_dump_folder_path}' )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
image_processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
print("Pushing model and image processor to the hub..." )
model.push_to_hub(F'nielsr/{model_name}' )
image_processor.push_to_hub(F'nielsr/{model_name}' )
if __name__ == "__main__":
UpperCAmelCase_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='maskformer-swin-tiny-ade',
type=str,
help=('Name of the MaskFormer model you\'d like to convert',),
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl',
type=str,
help='Path to the original state dict (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
UpperCAmelCase_ : int = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 44 | 0 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 262 |
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = range(2, 20 + 1)
UpperCAmelCase_ : str = [10**k for k in range(ks[-1] + 1)]
UpperCAmelCase_ : dict[int, dict[int, list[list[int]]]] = {}
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = sum(a_i[j] for j in range(_lowerCAmelCase , len(_lowerCAmelCase ) ) )
_lowerCamelCase : List[str] = sum(a_i[j] * base[j] for j in range(min(len(_lowerCAmelCase ) , _lowerCAmelCase ) ) )
_lowerCamelCase , _lowerCamelCase : int = 0, 0
_lowerCamelCase : Dict = n - i
_lowerCamelCase : int = memo.get(_lowerCAmelCase )
if sub_memo is not None:
_lowerCamelCase : List[str] = sub_memo.get(_lowerCAmelCase )
if jumps is not None and len(_lowerCAmelCase ) > 0:
# find and make the largest jump without going over
_lowerCamelCase : List[Any] = -1
for _k in range(len(_lowerCAmelCase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
_lowerCamelCase : Any = _k
break
if max_jump >= 0:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str = jumps[max_jump]
# since the difference between jumps is cached, add c
_lowerCamelCase : str = diff + c
for j in range(min(_lowerCAmelCase , len(_lowerCAmelCase ) ) ):
_lowerCamelCase , _lowerCamelCase : List[Any] = divmod(_lowerCAmelCase , 10 )
if new_c > 0:
add(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
_lowerCamelCase : int = []
else:
_lowerCamelCase : Tuple = {c: []}
_lowerCamelCase : Any = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
_lowerCamelCase , _lowerCamelCase : Optional[int] = next_term(_lowerCAmelCase , k - 1 , i + dn , _lowerCAmelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
_lowerCamelCase , _lowerCamelCase : List[str] = compute(_lowerCAmelCase , _lowerCAmelCase , i + dn , _lowerCAmelCase )
diff += _diff
dn += terms_jumped
_lowerCamelCase : List[str] = sub_memo[c]
# keep jumps sorted by # of terms skipped
_lowerCamelCase : int = 0
while j < len(_lowerCAmelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(_lowerCAmelCase , (diff, dn, k) )
return (diff, dn)
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Any , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any] ):
"""simple docstring"""
if i >= n:
return 0, i
if k > len(_lowerCAmelCase ):
a_i.extend([0 for _ in range(k - len(_lowerCAmelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
_lowerCamelCase : List[str] = i
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Any = 0, 0, 0
for j in range(len(_lowerCAmelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
_lowerCamelCase : int = ds_c + ds_b
diff += addend
_lowerCamelCase : List[str] = 0
for j in range(_lowerCAmelCase ):
_lowerCamelCase : List[Any] = a_i[j] + addend
_lowerCamelCase , _lowerCamelCase : Any = divmod(_lowerCAmelCase , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return diff, i - start_i
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : List[Any] ):
"""simple docstring"""
for j in range(_lowerCAmelCase , len(_lowerCAmelCase ) ):
_lowerCamelCase : Tuple = digits[j] + addend
if s >= 10:
_lowerCamelCase , _lowerCamelCase : Optional[int] = divmod(_lowerCAmelCase , 10 )
_lowerCamelCase : Any = addend // 10 + quotient
else:
_lowerCamelCase : Tuple = s
_lowerCamelCase : List[Any] = addend // 10
if addend == 0:
break
while addend > 0:
_lowerCamelCase , _lowerCamelCase : str = divmod(_lowerCAmelCase , 10 )
digits.append(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : int = 10**15 ):
"""simple docstring"""
_lowerCamelCase : Tuple = [1]
_lowerCamelCase : List[Any] = 1
_lowerCamelCase : List[str] = 0
while True:
_lowerCamelCase , _lowerCamelCase : Dict = next_term(_lowerCAmelCase , 20 , i + dn , _lowerCAmelCase )
dn += terms_jumped
if dn == n - i:
break
_lowerCamelCase : Optional[Any] = 0
for j in range(len(_lowerCAmelCase ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f'''{solution() = }''')
| 44 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.