code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(a_ ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(a_ ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 |
'''simple docstring'''
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def UpperCAmelCase ( a_ ) -> Dict[str, torch.Tensor]:
"""simple docstring"""
A_ : List[str] = []
A_ : Dict = []
A_ : List[Any] = []
for rt in rc.restypes:
A_ : Tuple = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
A_ : Union[str, Any] = {name: i for i, name in enumerate(a_ )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 1_4 )
restype_atomaa_to_atomaa_list.append([0] * 3_7 )
restype_atomaa_mask_list.append([0.0] * 1_4 )
A_ : Tuple = torch.tensor(
a_ , dtype=torch.intaa , device=protein["""aatype"""].device , )
A_ : Optional[int] = torch.tensor(
a_ , dtype=torch.intaa , device=protein["""aatype"""].device , )
A_ : List[Any] = torch.tensor(
a_ , dtype=torch.floataa , device=protein["""aatype"""].device , )
A_ : Optional[int] = protein["""aatype"""].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
A_ : Dict = restype_atomaa_to_atomaa[protein_aatype]
A_ : Optional[Any] = restype_atomaa_mask[protein_aatype]
A_ : Any = residx_atomaa_mask
A_ : List[str] = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
A_ : Tuple = restype_atomaa_to_atomaa[protein_aatype]
A_ : Tuple = residx_atomaa_to_atomaa.long()
# create the corresponding mask
A_ : Optional[Any] = torch.zeros([2_1, 3_7] , dtype=torch.floataa , device=protein["""aatype"""].device )
for restype, restype_letter in enumerate(rc.restypes ):
A_ : Optional[Any] = rc.restype_atoa[restype_letter]
A_ : Any = rc.residue_atoms[restype_name]
for atom_name in atom_names:
A_ : Any = rc.atom_order[atom_name]
A_ : Optional[int] = 1
A_ : Optional[int] = restype_atomaa_mask[protein_aatype]
A_ : Dict = residx_atomaa_mask
return protein
def UpperCAmelCase ( a_ ) -> Dict[str, np.ndarray]:
"""simple docstring"""
A_ : Union[str, Any] = tree_map(lambda a_ : torch.tensor(a_ , device=batch["""aatype"""].device ) , a_ , np.ndarray )
A_ : Optional[int] = tensor_tree_map(lambda a_ : np.array(a_ ) , make_atomaa_masks(a_ ) )
return out
| 344 | 0 |
"""simple docstring"""
def lowerCamelCase_ (UpperCamelCase__ : list ):
if len(UpperCamelCase__ ) <= 1:
return [tuple(UpperCamelCase__ )]
_UpperCAmelCase : Dict = []
def generate(UpperCamelCase__ : int , UpperCamelCase__ : list ):
_UpperCAmelCase : int = [0] * n
res.append(tuple(UpperCamelCase__ ) )
_UpperCAmelCase : Optional[Any] = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
_UpperCAmelCase : Optional[Any] = arr[i], arr[0]
else:
_UpperCAmelCase : int = arr[i], arr[c[i]]
res.append(tuple(UpperCamelCase__ ) )
c[i] += 1
_UpperCAmelCase : int = 0
else:
_UpperCAmelCase : Dict = 0
i += 1
generate(len(UpperCamelCase__ ) , UpperCamelCase__ )
return res
if __name__ == "__main__":
_lowerCAmelCase :str = input('Enter numbers separated by a comma:\n').strip()
_lowerCAmelCase :List[str] = [int(item) for item in user_input.split(',')]
print(heaps(arr))
| 369 |
"""simple docstring"""
import datasets
from .evaluate import evaluate
_lowerCAmelCase :int = '\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n'
_lowerCAmelCase :int = '\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n'
_lowerCAmelCase :str = '\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the CUAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\n \'aupr\': Area Under the Precision-Recall curve\n \'prec_at_80_recall\': Precision at 80% recall\n \'prec_at_90_recall\': Precision at 90% recall\nExamples:\n >>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> cuad_metric = datasets.load_metric("cuad")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': {
'''id''': datasets.Value('''string''' ),
'''prediction_text''': datasets.features.Sequence(datasets.Value('''string''' ) ),
},
'''references''': {
'''id''': datasets.Value('''string''' ),
'''answers''': datasets.features.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
},
} ) , codebase_urls=['''https://www.atticusprojectai.org/cuad'''] , reference_urls=['''https://www.atticusprojectai.org/cuad'''] , )
def __lowerCAmelCase ( self , A , A ) -> List[Any]:
_UpperCAmelCase : Optional[int] = {prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions}
_UpperCAmelCase : Optional[Any] = [
{
'''paragraphs''': [
{
'''qas''': [
{
'''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']],
'''id''': ref['''id'''],
}
for ref in references
]
}
]
}
]
_UpperCAmelCase : Union[str, Any] = evaluate(dataset=A , predictions=A )
return score
| 68 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
_UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
_UpperCAmelCase : Any = {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json""",
"""allenai/longformer-large-4096""": """https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json""",
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"""
),
}
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = """longformer"""
def __init__( self : Any , UpperCAmelCase : Union[List[int], int] = 512 , UpperCAmelCase : int = 2 , UpperCAmelCase : int = 1 , UpperCAmelCase : int = 0 , UpperCAmelCase : int = 2 , UpperCAmelCase : int = 30522 , UpperCAmelCase : int = 768 , UpperCAmelCase : int = 12 , UpperCAmelCase : int = 12 , UpperCAmelCase : int = 3072 , UpperCAmelCase : str = "gelu" , UpperCAmelCase : float = 0.1 , UpperCAmelCase : float = 0.1 , UpperCAmelCase : int = 512 , UpperCAmelCase : int = 2 , UpperCAmelCase : float = 0.0_2 , UpperCAmelCase : float = 1e-12 , UpperCAmelCase : bool = False , **UpperCAmelCase : int , ) -> Union[str, Any]:
super().__init__(pad_token_id=UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase__ : str = attention_window
lowerCamelCase__ : Optional[int] = sep_token_id
lowerCamelCase__ : Optional[Any] = bos_token_id
lowerCamelCase__ : int = eos_token_id
lowerCamelCase__ : Any = vocab_size
lowerCamelCase__ : Union[str, Any] = hidden_size
lowerCamelCase__ : str = num_hidden_layers
lowerCamelCase__ : int = num_attention_heads
lowerCamelCase__ : List[str] = hidden_act
lowerCamelCase__ : Any = intermediate_size
lowerCamelCase__ : Optional[int] = hidden_dropout_prob
lowerCamelCase__ : List[str] = attention_probs_dropout_prob
lowerCamelCase__ : Tuple = max_position_embeddings
lowerCamelCase__ : str = type_vocab_size
lowerCamelCase__ : List[Any] = initializer_range
lowerCamelCase__ : Union[str, Any] = layer_norm_eps
lowerCamelCase__ : Optional[Any] = onnx_export
class lowerCAmelCase ( __UpperCamelCase ):
def __init__( self : Optional[Any] , UpperCAmelCase : "PretrainedConfig" , UpperCAmelCase : str = "default" , UpperCAmelCase : "List[PatchingSpec]" = None ) -> Any:
super().__init__(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : Any = True
@property
def A_ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCamelCase__ : int = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCamelCase__ : Optional[int] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('global_attention_mask', dynamic_axis),
] )
@property
def A_ ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
lowerCamelCase__ : Any = super().outputs
if self.task == "default":
lowerCamelCase__ : List[Any] = {0: 'batch'}
return outputs
@property
def A_ ( self : Optional[int] ) -> float:
return 1e-4
@property
def A_ ( self : str ) -> int:
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14 )
def A_ ( self : List[str] , UpperCAmelCase : "PreTrainedTokenizerBase" , UpperCAmelCase : int = -1 , UpperCAmelCase : int = -1 , UpperCAmelCase : bool = False , UpperCAmelCase : Optional[TensorType] = None , ) -> Mapping[str, Any]:
lowerCamelCase__ : List[str] = super().generate_dummy_inputs(
preprocessor=UpperCAmelCase , batch_size=UpperCAmelCase , seq_length=UpperCAmelCase , is_pair=UpperCAmelCase , framework=UpperCAmelCase )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
lowerCamelCase__ : Dict = torch.zeros_like(inputs['input_ids'] )
# make every second token global
lowerCamelCase__ : Dict = 1
return inputs
| 50 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
_UpperCAmelCase : int = None
_UpperCAmelCase : Dict = logging.get_logger(__name__)
_UpperCAmelCase : Optional[int] = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
_UpperCAmelCase : List[Any] = {
"""vocab_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"""
),
},
}
_UpperCAmelCase : List[str] = {
"""facebook/nllb-large-en-ro""": 10_24,
"""facebook/nllb-200-distilled-600M""": 10_24,
}
# fmt: off
_UpperCAmelCase : Optional[int] = ["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""]
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = ["""input_ids""", """attention_mask"""]
UpperCAmelCase__ = NllbTokenizer
UpperCAmelCase__ = []
UpperCAmelCase__ = []
def __init__( self : Tuple , UpperCAmelCase : int=None , UpperCAmelCase : Any=None , UpperCAmelCase : str="<s>" , UpperCAmelCase : Optional[Any]="</s>" , UpperCAmelCase : str="</s>" , UpperCAmelCase : Tuple="<s>" , UpperCAmelCase : Optional[Any]="<unk>" , UpperCAmelCase : List[str]="<pad>" , UpperCAmelCase : Union[str, Any]="<mask>" , UpperCAmelCase : Tuple=None , UpperCAmelCase : int=None , UpperCAmelCase : Dict=None , UpperCAmelCase : Any=False , **UpperCAmelCase : Optional[int] , ) -> Tuple:
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase__ : List[Any] = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else mask_token
lowerCamelCase__ : Union[str, Any] = legacy_behaviour
super().__init__(
vocab_file=UpperCAmelCase , tokenizer_file=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , unk_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , src_lang=UpperCAmelCase , tgt_lang=UpperCAmelCase , additional_special_tokens=UpperCAmelCase , legacy_behaviour=UpperCAmelCase , **UpperCAmelCase , )
lowerCamelCase__ : List[Any] = vocab_file
lowerCamelCase__ : Dict = False if not self.vocab_file else True
lowerCamelCase__ : Optional[Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
lowerCamelCase__ : str = {
lang_code: self.convert_tokens_to_ids(UpperCAmelCase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowerCamelCase__ : int = src_lang if src_lang is not None else 'eng_Latn'
lowerCamelCase__ : List[Any] = self.convert_tokens_to_ids(self._src_lang )
lowerCamelCase__ : str = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def A_ ( self : int ) -> str:
return self._src_lang
@src_lang.setter
def A_ ( self : List[Any] , UpperCAmelCase : str ) -> None:
lowerCamelCase__ : Any = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def A_ ( self : Optional[Any] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def A_ ( self : Optional[Any] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
lowerCamelCase__ : Dict = [self.sep_token_id]
lowerCamelCase__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A_ ( self : int , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : Optional[str] , UpperCAmelCase : Optional[str] , **UpperCAmelCase : List[str] ) -> Dict:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
lowerCamelCase__ : Optional[int] = src_lang
lowerCamelCase__ : Optional[int] = self(UpperCAmelCase , add_special_tokens=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = self.convert_tokens_to_ids(UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = tgt_lang_id
return inputs
def A_ ( self : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : str = "eng_Latn" , UpperCAmelCase : Optional[List[str]] = None , UpperCAmelCase : str = "fra_Latn" , **UpperCAmelCase : Dict , ) -> BatchEncoding:
lowerCamelCase__ : Any = src_lang
lowerCamelCase__ : int = tgt_lang
return super().prepare_seqaseq_batch(UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
def A_ ( self : Union[str, Any] ) -> Optional[int]:
return self.set_src_lang_special_tokens(self.src_lang )
def A_ ( self : Any ) -> Union[str, Any]:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def A_ ( self : str , UpperCAmelCase : Optional[Any] ) -> None:
lowerCamelCase__ : int = self.convert_tokens_to_ids(UpperCAmelCase )
if self.legacy_behaviour:
lowerCamelCase__ : int = []
lowerCamelCase__ : str = [self.eos_token_id, self.cur_lang_code]
else:
lowerCamelCase__ : int = [self.cur_lang_code]
lowerCamelCase__ : Tuple = [self.eos_token_id]
lowerCamelCase__ : Any = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase__ : Optional[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase__ : str = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def A_ ( self : int , UpperCAmelCase : str ) -> None:
lowerCamelCase__ : Union[str, Any] = self.convert_tokens_to_ids(UpperCAmelCase )
if self.legacy_behaviour:
lowerCamelCase__ : Dict = []
lowerCamelCase__ : Union[str, Any] = [self.eos_token_id, self.cur_lang_code]
else:
lowerCamelCase__ : Any = [self.cur_lang_code]
lowerCamelCase__ : Optional[Any] = [self.eos_token_id]
lowerCamelCase__ : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase__ : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase__ : Optional[int] = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def A_ ( self : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory.""" )
return
lowerCamelCase__ : int = os.path.join(
UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase ):
copyfile(self.vocab_file , UpperCAmelCase )
return (out_vocab_file,)
| 50 | 1 |
'''simple docstring'''
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def SCREAMING_SNAKE_CASE__ ( snake_case : int ) -> List[str]:
"""simple docstring"""
# A local function to see if a dot lands in the circle.
def is_in_circle(snake_case : float , snake_case : float ) -> bool:
a : Any = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
a : List[Any] = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(SCREAMING_SNAKE_CASE_ ) )
# The ratio of the area for circle to square is pi/4.
a : Optional[Any] = proportion * 4
print(F"""The estimated value of pi is {pi_estimate}""" )
print(F"""The numpy value of pi is {pi}""" )
print(F"""The total error is {abs(pi - pi_estimate )}""" )
def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : Callable[[float], float] , snake_case : float = 0.0 , snake_case : float = 1.0 , ) -> float:
"""simple docstring"""
return mean(
function_to_integrate(uniform(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) for _ in range(SCREAMING_SNAKE_CASE_ ) ) * (max_value - min_value)
def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : float = 0.0 , snake_case : float = 1.0 ) -> None:
"""simple docstring"""
def identity_function(snake_case : float ) -> float:
return x
a : str = area_under_curve_estimator(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
a : List[Any] = (max_value * max_value - min_value * min_value) / 2
print('******************' )
print(F"""Estimating area under y=x where x varies from {min_value} to {max_value}""" )
print(F"""Estimated value is {estimated_value}""" )
print(F"""Expected value is {expected_value}""" )
print(F"""Total error is {abs(estimated_value - expected_value )}""" )
print('******************' )
def SCREAMING_SNAKE_CASE__ ( snake_case : int ) -> None:
"""simple docstring"""
def function_to_integrate(snake_case : float ) -> float:
return sqrt(4.0 - x * x )
a : Dict = area_under_curve_estimator(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 0.0 , 2.0 )
print('******************' )
print('Estimating pi using area_under_curve_estimator' )
print(F"""Estimated value is {estimated_value}""" )
print(F"""Expected value is {pi}""" )
print(F"""Total error is {abs(estimated_value - pi )}""" )
print('******************' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 366 | '''simple docstring'''
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def SCREAMING_SNAKE_CASE__ ( snake_case : str ) -> Optional[Any]:
"""simple docstring"""
a : Union[str, Any] = SwinConfig()
a : Optional[int] = swin_name.split('_' )
a : Union[str, Any] = name_split[1]
a : Dict = int(name_split[4] )
a : Union[str, Any] = int(name_split[3][-1] )
if model_size == "tiny":
a : Optional[Any] = 96
a : Any = (2, 2, 6, 2)
a : List[str] = (3, 6, 12, 24)
elif model_size == "small":
a : int = 96
a : List[str] = (2, 2, 18, 2)
a : int = (3, 6, 12, 24)
elif model_size == "base":
a : Tuple = 128
a : Optional[int] = (2, 2, 18, 2)
a : List[Any] = (4, 8, 16, 32)
else:
a : Dict = 192
a : str = (2, 2, 18, 2)
a : List[Any] = (6, 12, 24, 48)
if "in22k" in swin_name:
a : Any = 21_841
else:
a : str = 1_000
a : str = 'huggingface/label-files'
a : Optional[Any] = 'imagenet-1k-id2label.json'
a : Dict = json.load(open(hf_hub_download(snake_case , snake_case , repo_type='dataset' ) , 'r' ) )
a : Tuple = {int(snake_case ): v for k, v in idalabel.items()}
a : int = idalabel
a : str = {v: k for k, v in idalabel.items()}
a : Dict = img_size
a : List[Any] = num_classes
a : str = embed_dim
a : Dict = depths
a : Union[str, Any] = num_heads
a : int = window_size
return config
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] ) -> Optional[int]:
"""simple docstring"""
if "patch_embed.proj" in name:
a : int = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
a : Tuple = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
a : Optional[int] = 'encoder.' + name
if "attn.proj" in name:
a : List[Any] = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
a : Tuple = name.replace('attn' , 'attention.self' )
if "norm1" in name:
a : Optional[int] = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
a : Dict = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
a : Union[str, Any] = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
a : Any = name.replace('mlp.fc2' , 'output.dense' )
if name == "norm.weight":
a : Union[str, Any] = 'layernorm.weight'
if name == "norm.bias":
a : List[str] = 'layernorm.bias'
if "head" in name:
a : Union[str, Any] = name.replace('head' , 'classifier' )
else:
a : List[Any] = 'swin.' + name
return name
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] , snake_case : Tuple ) -> List[str]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
a : Any = orig_state_dict.pop(snake_case )
if "mask" in key:
continue
elif "qkv" in key:
a : Optional[Any] = key.split('.' )
a : Dict = int(key_split[1] )
a : Optional[int] = int(key_split[3] )
a : Tuple = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
a : Optional[Any] = val[:dim, :]
a : List[Any] = val[
dim : dim * 2, :
]
a : List[Any] = val[-dim:, :]
else:
a : Dict = val[
:dim
]
a : Union[str, Any] = val[
dim : dim * 2
]
a : Union[str, Any] = val[
-dim:
]
else:
a : Dict = val
return orig_state_dict
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[int] , snake_case : Dict ) -> List[str]:
"""simple docstring"""
a : Any = timm.create_model(snake_case , pretrained=snake_case )
timm_model.eval()
a : str = get_swin_config(snake_case )
a : Optional[int] = SwinForImageClassification(snake_case )
model.eval()
a : Union[str, Any] = convert_state_dict(timm_model.state_dict() , snake_case )
model.load_state_dict(snake_case )
a : List[str] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
a : Optional[Any] = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swin_name.replace('_' , '-' ) ) )
a : str = Image.open(requests.get(snake_case , stream=snake_case ).raw )
a : Union[str, Any] = image_processor(images=snake_case , return_tensors='pt' )
a : int = timm_model(inputs['pixel_values'] )
a : Optional[int] = model(**snake_case ).logits
assert torch.allclose(snake_case , snake_case , atol=1E-3 )
print(F"""Saving model {swin_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(snake_case )
if __name__ == "__main__":
UpperCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swin_name""",
default="""swin_tiny_patch4_window7_224""",
type=str,
help="""Name of the Swin timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
UpperCamelCase : Optional[Any] = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 345 | 0 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class _snake_case :
def __init__( self , a__ , a__=3 , a__=7 , a__=True , a__=True , a__=False , a__=True , a__=99 , a__=32 , a__=5 , a__=4 , a__=37 , a__="gelu" , a__=0.1 , a__=0.1 , a__=512 , a__=16 , a__=2 , a__=0.0_2 , a__=3 , a__=4 , a__=None , ) -> List[str]:
'''simple docstring'''
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = num_choices
snake_case_ = scope
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_input_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a__ , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=a__ , )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = FalconModel(config=a__ )
model.to(a__ )
model.eval()
snake_case_ = model(a__ , attention_mask=a__ )
snake_case_ = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ) -> Optional[int]:
'''simple docstring'''
snake_case_ = True
snake_case_ = FalconModel(a__ )
model.to(a__ )
model.eval()
snake_case_ = model(
a__ , attention_mask=a__ , encoder_hidden_states=a__ , encoder_attention_mask=a__ , )
snake_case_ = model(
a__ , attention_mask=a__ , encoder_hidden_states=a__ , )
snake_case_ = model(a__ , attention_mask=a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ) -> str:
'''simple docstring'''
snake_case_ = FalconForCausalLM(config=a__ )
model.to(a__ )
model.eval()
snake_case_ = model(a__ , attention_mask=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ) -> Dict:
'''simple docstring'''
snake_case_ = True
snake_case_ = True
snake_case_ = FalconForCausalLM(config=a__ )
model.to(a__ )
model.eval()
# first forward pass
snake_case_ = model(
a__ , attention_mask=a__ , encoder_hidden_states=a__ , encoder_attention_mask=a__ , use_cache=a__ , )
snake_case_ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
snake_case_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case_ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
snake_case_ = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case_ = torch.cat([input_mask, next_mask] , dim=-1 )
snake_case_ = model(
a__ , attention_mask=a__ , encoder_hidden_states=a__ , encoder_attention_mask=a__ , output_hidden_states=a__ , )["hidden_states"][0]
snake_case_ = model(
a__ , attention_mask=a__ , encoder_hidden_states=a__ , encoder_attention_mask=a__ , past_key_values=a__ , output_hidden_states=a__ , )["hidden_states"][0]
# select random slice
snake_case_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case_ = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a__ , a__ , atol=1e-3 ) )
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) = config_and_inputs
snake_case_ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _snake_case ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
lowerCAmelCase_ : Optional[int] = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCAmelCase_ : List[Any] = (FalconForCausalLM,) if is_torch_available() else ()
lowerCAmelCase_ : Union[str, Any] = (
{
"feature-extraction": FalconModel,
"text-classification": FalconForSequenceClassification,
"text-generation": FalconForCausalLM,
"question-answering": FalconForQuestionAnswering,
"token-classification": FalconForTokenClassification,
"zero-shot": FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase_ : Optional[Any] = False
lowerCAmelCase_ : List[Any] = False
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ = FalconModelTester(self )
snake_case_ = ConfigTester(self , config_class=a__ , hidden_size=37 )
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ , *snake_case_ = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
snake_case_ = alibi
self.model_tester.create_and_check_model(a__ , *a__ )
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = 3
snake_case_ = input_dict["input_ids"]
snake_case_ = input_ids.ne(1 ).to(a__ )
snake_case_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
snake_case_ = FalconForSequenceClassification(a__ )
model.to(a__ )
model.eval()
snake_case_ = model(a__ , attention_mask=a__ , labels=a__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = 3
snake_case_ = "single_label_classification"
snake_case_ = input_dict["input_ids"]
snake_case_ = input_ids.ne(1 ).to(a__ )
snake_case_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
snake_case_ = FalconForSequenceClassification(a__ )
model.to(a__ )
model.eval()
snake_case_ = model(a__ , attention_mask=a__ , labels=a__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = input_dict["input_ids"]
snake_case_ = FalconForCausalLM(a__ )
model.to(a__ )
model.eval()
snake_case_ = model(a__ , use_cache=a__ )
snake_case_ = input_ids.shape[0]
snake_case_ = model._convert_to_rw_cache(result.past_key_values )
snake_case_ = model._convert_cache_to_standard_format(a__ , a__ )
for layer in range(len(a__ ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = 3
snake_case_ = "multi_label_classification"
snake_case_ = input_dict["input_ids"]
snake_case_ = input_ids.ne(1 ).to(a__ )
snake_case_ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
snake_case_ = FalconForSequenceClassification(a__ )
model.to(a__ )
model.eval()
snake_case_ = model(a__ , attention_mask=a__ , labels=a__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
for model_class in self.all_generative_model_classes:
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(a__ , "use_cache" ):
return
snake_case_ = model_class(a__ ).to(a__ )
if "use_cache" not in inputs:
snake_case_ = True
snake_case_ = model(**a__ )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
snake_case_ = (
getattr(a__ , "decoder_layers" , a__ )
or getattr(a__ , "num_decoder_layers" , a__ )
or config.num_hidden_layers
)
snake_case_ = getattr(a__ , "num_kv_heads" , config.num_attention_heads )
snake_case_ = getattr(a__ , "d_model" , config.hidden_size )
snake_case_ = embed_dim // num_attention_heads
snake_case_ = outputs["past_key_values"]
self.assertEqual(len(a__ ) , a__ )
snake_case_ , snake_case_ = inputs["input_ids"].shape
for i in range(a__ ):
if config.new_decoder_architecture:
snake_case_ = config.num_attention_heads
elif config.multi_query:
snake_case_ = 1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class _snake_case ( unittest.TestCase ):
@slow
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ = AutoTokenizer.from_pretrained("Rocketknight1/falcon-rw-1b" )
snake_case_ = FalconForCausalLM.from_pretrained("Rocketknight1/falcon-rw-1b" )
model.eval()
model.to(a__ )
snake_case_ = tokenizer("My favorite food is" , return_tensors="pt" ).to(a__ )
snake_case_ = (
"My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday."
)
snake_case_ = model.generate(**a__ , do_sample=a__ , max_new_tokens=19 )
snake_case_ = tokenizer.batch_decode(a__ )[0]
self.assertEqual(a__ , a__ )
@slow
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
snake_case_ = AutoTokenizer.from_pretrained(a__ )
snake_case_ = FalconForCausalLM.from_pretrained(a__ )
model.eval()
model.to(a__ )
snake_case_ = tokenizer("My favorite food is" , return_tensors="pt" ).to(a__ )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**a__ , do_sample=a__ , max_new_tokens=4 )
model.generate(**a__ , do_sample=a__ , max_new_tokens=4 )
model.generate(**a__ , num_beams=2 , max_new_tokens=4 )
@slow
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
snake_case_ = AutoTokenizer.from_pretrained(a__ )
snake_case_ = FalconForCausalLM.from_pretrained(a__ )
model.eval()
model.to(device=a__ )
snake_case_ = tokenizer("My favorite food is" , return_tensors="pt" ).to(a__ )
# Test results are the same with and without cache
snake_case_ = model.generate(**a__ , do_sample=a__ , max_new_tokens=20 , use_cache=a__ )
snake_case_ = model.generate(**a__ , do_sample=a__ , max_new_tokens=20 , use_cache=a__ )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 85 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/config.json''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/config.json''',
}
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : Tuple = """xlnet"""
_UpperCamelCase : Optional[Any] = ["""mems"""]
_UpperCamelCase : Tuple = {
"""n_token""": """vocab_size""", # Backward compatibility
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , snake_case=3_2000 , snake_case=1024 , snake_case=24 , snake_case=16 , snake_case=4096 , snake_case="gelu" , snake_case=True , snake_case="bi" , snake_case=0.02 , snake_case=1E-12 , snake_case=0.1 , snake_case=512 , snake_case=None , snake_case=True , snake_case=False , snake_case=False , snake_case=-1 , snake_case=False , snake_case="last" , snake_case=True , snake_case="tanh" , snake_case=0.1 , snake_case=5 , snake_case=5 , snake_case=5 , snake_case=1 , snake_case=2 , **snake_case , ):
lowercase = vocab_size
lowercase = d_model
lowercase = n_layer
lowercase = n_head
if d_model % n_head != 0:
raise ValueError(F'''\'d_model % n_head\' ({d_model % n_head}) should be equal to 0''' )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
F'''`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})''' )
lowercase = d_model // n_head
lowercase = ff_activation
lowercase = d_inner
lowercase = untie_r
lowercase = attn_type
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = dropout
lowercase = mem_len
lowercase = reuse_len
lowercase = bi_data
lowercase = clamp_len
lowercase = same_length
lowercase = summary_type
lowercase = summary_use_proj
lowercase = summary_activation
lowercase = summary_last_dropout
lowercase = start_n_top
lowercase = end_n_top
lowercase = bos_token_id
lowercase = pad_token_id
lowercase = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
'The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`'
' instead.' , snake_case , )
lowercase = kwargs['use_cache']
lowercase = use_mems_eval
lowercase = use_mems_train
super().__init__(pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , **snake_case )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
logger.info(F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
# Message copied from Transformer-XL documentation
raise NotImplementedError(
F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 195 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A_ : str = {
"""configuration_roberta_prelayernorm""": [
"""ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""RobertaPreLayerNormConfig""",
"""RobertaPreLayerNormOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[Any] = [
"""ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaPreLayerNormForCausalLM""",
"""RobertaPreLayerNormForMaskedLM""",
"""RobertaPreLayerNormForMultipleChoice""",
"""RobertaPreLayerNormForQuestionAnswering""",
"""RobertaPreLayerNormForSequenceClassification""",
"""RobertaPreLayerNormForTokenClassification""",
"""RobertaPreLayerNormModel""",
"""RobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[int] = [
"""TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaPreLayerNormForCausalLM""",
"""TFRobertaPreLayerNormForMaskedLM""",
"""TFRobertaPreLayerNormForMultipleChoice""",
"""TFRobertaPreLayerNormForQuestionAnswering""",
"""TFRobertaPreLayerNormForSequenceClassification""",
"""TFRobertaPreLayerNormForTokenClassification""",
"""TFRobertaPreLayerNormMainLayer""",
"""TFRobertaPreLayerNormModel""",
"""TFRobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[Any] = [
"""FlaxRobertaPreLayerNormForCausalLM""",
"""FlaxRobertaPreLayerNormForMaskedLM""",
"""FlaxRobertaPreLayerNormForMultipleChoice""",
"""FlaxRobertaPreLayerNormForQuestionAnswering""",
"""FlaxRobertaPreLayerNormForSequenceClassification""",
"""FlaxRobertaPreLayerNormForTokenClassification""",
"""FlaxRobertaPreLayerNormModel""",
"""FlaxRobertaPreLayerNormPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
A_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 349 |
'''simple docstring'''
from argparse import ArgumentParser
from .env import EnvironmentCommand
def snake_case_ ( )-> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = ArgumentParser("""Diffusers CLI tool""" , usage="""diffusers-cli <command> [<args>]""" )
_UpperCAmelCase : str = parser.add_subparsers(help="""diffusers-cli command helpers""" )
# Register commands
EnvironmentCommand.register_subcommand(lowerCAmelCase_ )
# Let's go
_UpperCAmelCase : Union[str, Any] = parser.parse_args()
if not hasattr(lowerCAmelCase_ , """func""" ):
parser.print_help()
exit(1 )
# Run
_UpperCAmelCase : Optional[int] = args.func(lowerCAmelCase_ )
service.run()
if __name__ == "__main__":
main()
| 349 | 1 |
'''simple docstring'''
from sklearn.metrics import recall_score
import datasets
__snake_case = '\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n'
__snake_case = '\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the \'positive class\' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n - `\'binary\'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `\'micro\'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `\'macro\'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `\'weighted\'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `\'samples\'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `\'warn\'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {\'recall\': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {\'recall\': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {\'recall\': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'macro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'micro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'weighted\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'recall\': array([1., 0., 0.])}\n'
__snake_case = '\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html'''] , )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=1 , UpperCamelCase_="binary" , UpperCamelCase_=None , UpperCamelCase_="warn" , ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = recall_score(
_snake_case , _snake_case , labels=_snake_case , pos_label=_snake_case , average=_snake_case , sample_weight=_snake_case , zero_division=_snake_case , )
return {"recall": float(_snake_case ) if score.size == 1 else score} | 97 |
def __lowerCAmelCase ( a__ ) -> str:
__a = []
__a = set({'''(''', '''[''', '''{'''} )
__a = set({''')''', ''']''', '''}'''} )
__a = {'''{''': '''}''', '''[''': ''']''', '''(''': ''')'''}
for i in range(len(a__ ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(a__ ) == 0 or (len(a__ ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(a__ ) == 0
def __lowerCAmelCase ( ) -> Dict:
__a = input('''Enter sequence of brackets: ''' )
if is_balanced(a__ ):
print(a__ , '''is balanced''' )
else:
print(a__ , '''is not balanced''' )
if __name__ == "__main__":
main() | 6 | 0 |
'''simple docstring'''
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
a_ = logging.getLogger()
a_ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __SCREAMING_SNAKE_CASE ( snake_case_ ):
def __magic_name__ ( self : str , __lowercase : Any ) -> Union[str, Any]:
os.makedirs(__lowercase , exist_ok=__lowercase )
SCREAMING_SNAKE_CASE__ : Any ={'''source''': '''What is love ?''', '''target''': '''life'''}
SCREAMING_SNAKE_CASE__ : Tuple ={'''train''': 12, '''val''': 2, '''test''': 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
SCREAMING_SNAKE_CASE__ : List[Any] ='''\n'''.join([contents[field]] * n_lines[split] )
with open(os.path.join(__lowercase , F"{split}.{field}" ) , '''w''' ) as f:
f.write(__lowercase )
def __magic_name__ ( self : List[str] , __lowercase : Dict , __lowercase : Tuple = "pytorch" ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Dict =self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE__ : Tuple =os.path.join(__lowercase , '''output''' )
SCREAMING_SNAKE_CASE__ : str =os.path.join(__lowercase , '''data''' )
self._create_dummy_data(data_dir=__lowercase )
SCREAMING_SNAKE_CASE__ : int =F"\n --data_dir {data_dir} \\n --output_dir {output_dir} \\n --model_name_or_path facebook/rag-sequence-base \\n --model_type rag_sequence \\n --do_train \\n --do_predict \\n --n_val -1 \\n --val_check_interval 1.0 \\n --train_batch_size 2 \\n --eval_batch_size 1 \\n --max_source_length 25 \\n --max_target_length 25 \\n --val_max_target_length 25 \\n --test_max_target_length 25 \\n --label_smoothing 0.1 \\n --dropout 0.1 \\n --attention_dropout 0.1 \\n --weight_decay 0.001 \\n --adam_epsilon 1e-08 \\n --max_grad_norm 0.1 \\n --lr_scheduler polynomial \\n --learning_rate 3e-04 \\n --num_train_epochs 1 \\n --warmup_steps 4 \\n --gradient_accumulation_steps 1 \\n --distributed-port 8787 \\n --use_dummy_dataset 1 \\n --distributed_retriever {distributed_retriever} \\n ".split()
if gpus > 0:
testargs.append(F"--gpus={gpus}" )
if is_apex_available():
testargs.append('''--fp16''' )
else:
testargs.append('''--gpus=0''' )
testargs.append('''--distributed_backend=ddp_cpu''' )
testargs.append('''--num_processes=2''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] =[sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(__lowercase , env=self.get_env() )
SCREAMING_SNAKE_CASE__ : List[Any] =os.path.join(__lowercase , '''metrics.json''' )
with open(__lowercase ) as f:
SCREAMING_SNAKE_CASE__ : int =json.load(__lowercase )
return result
@require_torch_gpu
def __magic_name__ ( self : Dict ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self._run_finetune(gpus=1 )
self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2 )
@require_torch_multi_gpu
def __magic_name__ ( self : Any ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : List[Any] =self._run_finetune(gpus=2 )
self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2 )
@require_torch_gpu
@require_ray
def __magic_name__ ( self : Union[str, Any] ) -> Any:
SCREAMING_SNAKE_CASE__ : Optional[int] =self._run_finetune(gpus=1 , distributed_retriever='''ray''' )
self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2 )
@require_torch_multi_gpu
@require_ray
def __magic_name__ ( self : Union[str, Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] =self._run_finetune(gpus=1 , distributed_retriever='''ray''' )
self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2 ) | 353 |
'''simple docstring'''
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
a_ = 'src/diffusers'
# Matches is_xxx_available()
a_ = re.compile(R'is\_([a-z_]*)_available\(\)')
# Matches from xxx import bla
a_ = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
a_ = '\n{0} = None\n'
a_ = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n'
a_ = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
def _a( UpperCamelCase__ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict =_re_backend.findall(UpperCamelCase__ )
if len(UpperCamelCase__ ) == 0:
return None
return "_and_".join(UpperCamelCase__ )
def _a( ):
'''simple docstring'''
with open(os.path.join(UpperCamelCase__, '''__init__.py''' ), '''r''', encoding='''utf-8''', newline='''\n''' ) as f:
SCREAMING_SNAKE_CASE__ : List[str] =f.readlines()
# Get to the point we do the actual imports for type checking
SCREAMING_SNAKE_CASE__ : Optional[int] =0
SCREAMING_SNAKE_CASE__ : List[str] ={}
# Go through the end of the file
while line_index < len(UpperCamelCase__ ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
SCREAMING_SNAKE_CASE__ : List[Any] =find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith('''else:''' ):
line_index += 1
line_index += 1
SCREAMING_SNAKE_CASE__ : List[Any] =[]
# Until we unindent, add backend objects to the list
while line_index < len(UpperCamelCase__ ) and len(lines[line_index] ) > 1:
SCREAMING_SNAKE_CASE__ : Optional[Any] =lines[line_index]
SCREAMING_SNAKE_CASE__ : Optional[Any] =_re_single_line_import.search(UpperCamelCase__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(UpperCamelCase__ ) > 0:
SCREAMING_SNAKE_CASE__ : Any =objects
else:
line_index += 1
return backend_specific_objects
def _a( UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : List[Any] ):
'''simple docstring'''
if name.isupper():
return DUMMY_CONSTANT.format(UpperCamelCase__ )
elif name.islower():
return DUMMY_FUNCTION.format(UpperCamelCase__, UpperCamelCase__ )
else:
return DUMMY_CLASS.format(UpperCamelCase__, UpperCamelCase__ )
def _a( UpperCamelCase__ : Any=None ):
'''simple docstring'''
if backend_specific_objects is None:
SCREAMING_SNAKE_CASE__ : int =read_init()
# For special correspondence backend to module name as used in the function requires_modulename
SCREAMING_SNAKE_CASE__ : Optional[int] ={}
for backend, objects in backend_specific_objects.items():
SCREAMING_SNAKE_CASE__ : Tuple ='''[''' + ''', '''.join(f"\"{b}\"" for b in backend.split('''_and_''' ) ) + ''']'''
SCREAMING_SNAKE_CASE__ : List[str] ='''# This file is autogenerated by the command `make fix-copies`, do not edit.\n'''
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(UpperCamelCase__, UpperCamelCase__ ) for o in objects] )
SCREAMING_SNAKE_CASE__ : Tuple =dummy_file
return dummy_files
def _a( UpperCamelCase__ : Any=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple =create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
SCREAMING_SNAKE_CASE__ : List[str] ={'''torch''': '''pt'''}
# Locate actual dummy modules and read their content.
SCREAMING_SNAKE_CASE__ : List[str] =os.path.join(UpperCamelCase__, '''utils''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] ={
backend: os.path.join(UpperCamelCase__, f"dummy_{short_names.get(UpperCamelCase__, UpperCamelCase__ )}_objects.py" )
for backend in dummy_files.keys()
}
SCREAMING_SNAKE_CASE__ : str ={}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(UpperCamelCase__ ):
with open(UpperCamelCase__, '''r''', encoding='''utf-8''', newline='''\n''' ) as f:
SCREAMING_SNAKE_CASE__ : List[Any] =f.read()
else:
SCREAMING_SNAKE_CASE__ : int =''''''
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
f"Updating diffusers.utils.dummy_{short_names.get(UpperCamelCase__, UpperCamelCase__ )}_objects.py as the main "
'''__init__ has new objects.''' )
with open(dummy_file_paths[backend], '''w''', encoding='''utf-8''', newline='''\n''' ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
'''The main __init__ has objects that are not present in '''
f"diffusers.utils.dummy_{short_names.get(UpperCamelCase__, UpperCamelCase__ )}_objects.py. Run `make fix-copies` "
'''to fix this.''' )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
a_ = parser.parse_args()
check_dummies(args.fix_and_overwrite) | 222 | 0 |
'''simple docstring'''
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class _snake_case ( lowercase_ , unittest.TestCase ):
lowerCAmelCase_ : Optional[int] = CpmAntTokenizer
lowerCAmelCase_ : str = False
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
super().setUp()
snake_case_ = [
"<d>",
"</d>",
"<s>",
"</s>",
"</_>",
"<unk>",
"<pad>",
"</n>",
"我",
"是",
"C",
"P",
"M",
"A",
"n",
"t",
]
snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
@tooslow
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ = CpmAntTokenizer.from_pretrained("openbmb/cpm-ant-10b" )
snake_case_ = "今天天气真好!"
snake_case_ = ["今天", "天气", "真", "好", "!"]
snake_case_ = tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
snake_case_ = "今天天气真好!"
snake_case_ = [tokenizer.bos_token] + tokens
snake_case_ = [6, 9_802, 14_962, 2_082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
snake_case_ = tokenizer.decode(a__ )
self.assertEqual(a__ , a__ )
| 85 |
'''simple docstring'''
from __future__ import annotations
import requests
def UpperCamelCase_( snake_case : str ):
'''simple docstring'''
snake_case_ = f'https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'
return requests.get(snake_case ).json()
def UpperCamelCase_( snake_case : int = 1_0 ):
'''simple docstring'''
snake_case_ = "https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"
snake_case_ = requests.get(snake_case ).json()[:max_stories]
return [get_hackernews_story(snake_case ) for story_id in story_ids]
def UpperCamelCase_( snake_case : int = 1_0 ):
'''simple docstring'''
snake_case_ = hackernews_top_stories(snake_case )
return "\n".join("* [{title}]({url})".format(**snake_case ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 85 | 1 |
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
__UpperCAmelCase :Optional[Any] = [
"good first issue",
"good second issue",
"good difficult issue",
"enhancement",
"new pipeline/model",
"new scheduler",
"wip",
]
def _a ( ):
'''simple docstring'''
__UpperCAmelCase : List[str] = Github(os.environ['''GITHUB_TOKEN'''] )
__UpperCAmelCase : Dict = g.get_repo('''huggingface/diffusers''' )
__UpperCAmelCase : Optional[int] = repo.get_issues(state='''open''' )
for issue in open_issues:
__UpperCAmelCase : Tuple = sorted(issue.get_comments() , key=lambda _lowercase : i.created_at , reverse=_lowercase )
__UpperCAmelCase : Optional[int] = comments[0] if len(_lowercase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='''closed''' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='''open''' )
issue.remove_from_labels('''stale''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
issue.add_to_labels('''stale''' )
if __name__ == "__main__":
main() | 240 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a ( _a , _a , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = StableDiffusionXLImgaImgPipeline
SCREAMING_SNAKE_CASE : Tuple = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
SCREAMING_SNAKE_CASE : Optional[Any] = PipelineTesterMixin.required_optional_params - {"latents"}
SCREAMING_SNAKE_CASE : int = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
SCREAMING_SNAKE_CASE : Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
SCREAMING_SNAKE_CASE : Any = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCamelCase__ ( self : Any ) -> Any:
torch.manual_seed(0 )
__UpperCAmelCase : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , attention_head_dim=(2, 4) , use_linear_projection=snake_case , addition_embed_type='''text_time''' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
__UpperCAmelCase : int = EulerDiscreteScheduler(
beta_start=0.00_085 , beta_end=0.012 , steps_offset=1 , beta_schedule='''scaled_linear''' , timestep_spacing='''leading''' , )
torch.manual_seed(0 )
__UpperCAmelCase : Tuple = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__UpperCAmelCase : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=32 , )
__UpperCAmelCase : Tuple = CLIPTextModel(snake_case )
__UpperCAmelCase : Optional[int] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=snake_case )
__UpperCAmelCase : Optional[Any] = CLIPTextModelWithProjection(snake_case )
__UpperCAmelCase : Union[str, Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=snake_case )
__UpperCAmelCase : Tuple = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''text_encoder_2''': text_encoder_a,
'''tokenizer_2''': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def lowerCamelCase__ ( self : Dict , snake_case : Optional[int] , snake_case : List[str]=0 ) -> List[str]:
__UpperCAmelCase : List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(snake_case ) ).to(snake_case )
__UpperCAmelCase : Optional[Any] = image / 2 + 0.5
if str(snake_case ).startswith('''mps''' ):
__UpperCAmelCase : List[str] = torch.manual_seed(snake_case )
else:
__UpperCAmelCase : Any = torch.Generator(device=snake_case ).manual_seed(snake_case )
__UpperCAmelCase : Optional[int] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 5.0,
'''output_type''': '''numpy''',
'''strength''': 0.75,
}
return inputs
def lowerCamelCase__ ( self : Tuple ) -> Optional[int]:
__UpperCAmelCase : Any = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase : Optional[Any] = self.get_dummy_components()
__UpperCAmelCase : Any = StableDiffusionXLImgaImgPipeline(**snake_case )
__UpperCAmelCase : Union[str, Any] = sd_pipe.to(snake_case )
sd_pipe.set_progress_bar_config(disable=snake_case )
__UpperCAmelCase : Optional[int] = self.get_dummy_inputs(snake_case )
__UpperCAmelCase : int = sd_pipe(**snake_case ).images
__UpperCAmelCase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__UpperCAmelCase : Any = np.array([0.4_656, 0.4_840, 0.4_439, 0.6_698, 0.5_574, 0.4_524, 0.5_799, 0.5_943, 0.5_165] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase__ ( self : Any ) -> int:
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def lowerCamelCase__ ( self : Optional[int] ) -> List[Any]:
pass
def lowerCamelCase__ ( self : Optional[int] ) -> int:
__UpperCAmelCase : Union[str, Any] = self.get_dummy_components()
__UpperCAmelCase : List[Any] = StableDiffusionXLImgaImgPipeline(**snake_case )
__UpperCAmelCase : Tuple = sd_pipe.to(snake_case )
__UpperCAmelCase : Tuple = sd_pipe.to(snake_case )
sd_pipe.set_progress_bar_config(disable=snake_case )
# forward without prompt embeds
__UpperCAmelCase : Tuple = self.get_dummy_inputs(snake_case )
__UpperCAmelCase : Optional[Any] = 3 * ['''this is a negative prompt''']
__UpperCAmelCase : Optional[int] = negative_prompt
__UpperCAmelCase : Tuple = 3 * [inputs['''prompt''']]
__UpperCAmelCase : Optional[Any] = sd_pipe(**snake_case )
__UpperCAmelCase : int = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
__UpperCAmelCase : Union[str, Any] = self.get_dummy_inputs(snake_case )
__UpperCAmelCase : Tuple = 3 * ['''this is a negative prompt''']
__UpperCAmelCase : str = 3 * [inputs.pop('''prompt''' )]
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : Any = sd_pipe.encode_prompt(snake_case , negative_prompt=snake_case )
__UpperCAmelCase : Dict = sd_pipe(
**snake_case , prompt_embeds=snake_case , negative_prompt_embeds=snake_case , pooled_prompt_embeds=snake_case , negative_pooled_prompt_embeds=snake_case , )
__UpperCAmelCase : List[str] = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self : Union[str, Any] ) -> Optional[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self : int , snake_case : Any , snake_case : str="cpu" , snake_case : Tuple=torch.floataa , snake_case : List[str]=0 ) -> Tuple:
__UpperCAmelCase : Optional[Any] = torch.Generator(device=snake_case ).manual_seed(snake_case )
__UpperCAmelCase : int = np.random.RandomState(snake_case ).standard_normal((1, 4, 64, 64) )
__UpperCAmelCase : str = torch.from_numpy(snake_case ).to(device=snake_case , dtype=snake_case )
__UpperCAmelCase : str = {
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def lowerCamelCase__ ( self : Union[str, Any] ) -> List[Any]:
__UpperCAmelCase : str = DiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-base''' )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
__UpperCAmelCase : str = self.get_inputs(snake_case )
__UpperCAmelCase : Dict = pipe(**snake_case ).images
__UpperCAmelCase : List[str] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__UpperCAmelCase : Tuple = np.array([0.49_493, 0.47_896, 0.40_798, 0.54_214, 0.53_212, 0.48_202, 0.47_656, 0.46_329, 0.48_506] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3 | 240 | 1 |
def lowerCamelCase__ ( snake_case_ : int=2_8123 ) -> int:
__snake_case = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
__snake_case = set()
__snake_case = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(snake_case_ )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 24 |
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__(self : Any , a__ : List[Any] , a__ : Dict=13 , a__ : str=32 , a__ : Tuple=3 , a__ : Optional[Any]=4 , a__ : Optional[int]=[10, 20, 30, 40] , a__ : List[Any]=[2, 2, 3, 2] , a__ : List[Any]=True , a__ : int=True , a__ : List[Any]=37 , a__ : Any="gelu" , a__ : int=10 , a__ : Dict=0.0_2 , a__ : Dict=["stage2", "stage3", "stage4"] , a__ : Tuple=[2, 3, 4] , a__ : List[str]=None , ):
"""simple docstring"""
__snake_case = parent
__snake_case = batch_size
__snake_case = image_size
__snake_case = num_channels
__snake_case = num_stages
__snake_case = hidden_sizes
__snake_case = depths
__snake_case = is_training
__snake_case = use_labels
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = num_labels
__snake_case = initializer_range
__snake_case = out_features
__snake_case = out_indices
__snake_case = scope
def a (self : Dict ):
"""simple docstring"""
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.num_labels )
__snake_case = self.get_config()
return config, pixel_values, labels
def a (self : List[str] ):
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=a__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def a (self : str , a__ : Union[str, Any] , a__ : List[str] , a__ : List[Any] ):
"""simple docstring"""
__snake_case = ConvNextModel(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(a__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a (self : Optional[Any] , a__ : List[Any] , a__ : str , a__ : List[Any] ):
"""simple docstring"""
__snake_case = ConvNextForImageClassification(a__ )
model.to(a__ )
model.eval()
__snake_case = model(a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a (self : Tuple , a__ : List[Any] , a__ : List[str] , a__ : List[str] ):
"""simple docstring"""
__snake_case = ConvNextBackbone(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(a__ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__snake_case = None
__snake_case = ConvNextBackbone(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(a__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def a (self : Tuple ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
A_ : Dict = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
A_ : Optional[Any] = (
{'feature-extraction': ConvNextModel, 'image-classification': ConvNextForImageClassification}
if is_torch_available()
else {}
)
A_ : Dict = True
A_ : Optional[Any] = False
A_ : int = False
A_ : int = False
A_ : List[str] = False
def a (self : List[str] ):
"""simple docstring"""
__snake_case = ConvNextModelTester(self )
__snake_case = ConfigTester(self , config_class=a__ , has_text_modality=a__ , hidden_size=37 )
def a (self : Tuple ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a (self : str ):
"""simple docstring"""
return
@unittest.skip(reason='''ConvNext does not use inputs_embeds''' )
def a (self : int ):
"""simple docstring"""
pass
@unittest.skip(reason='''ConvNext does not support input and output embeddings''' )
def a (self : Dict ):
"""simple docstring"""
pass
@unittest.skip(reason='''ConvNext does not use feedforward chunking''' )
def a (self : List[Any] ):
"""simple docstring"""
pass
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(a__ )
__snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , a__ )
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def a (self : Dict ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*a__ )
def a (self : Dict ):
"""simple docstring"""
def check_hidden_states_output(a__ : List[str] , a__ : str , a__ : Tuple ):
__snake_case = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
__snake_case = model(**self._prepare_for_class(a__ , a__ ) )
__snake_case = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__snake_case = self.model_tester.num_stages
self.assertEqual(len(a__ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = True
check_hidden_states_output(a__ , a__ , a__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case = True
check_hidden_states_output(a__ , a__ , a__ )
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a__ )
@slow
def a (self : Any ):
"""simple docstring"""
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = ConvNextModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def lowerCamelCase__ ( ) -> List[str]:
__snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def a (self : Tuple ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained('''facebook/convnext-tiny-224''' ) if is_vision_available() else None
@slow
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = ConvNextForImageClassification.from_pretrained('''facebook/convnext-tiny-224''' ).to(a__ )
__snake_case = self.default_image_processor
__snake_case = prepare_img()
__snake_case = image_processor(images=a__ , return_tensors='''pt''' ).to(a__ )
# forward pass
with torch.no_grad():
__snake_case = model(**a__ )
# verify the logits
__snake_case = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , a__ )
__snake_case = torch.tensor([-0.0_2_6_0, -0.4_7_3_9, 0.1_9_1_1] ).to(a__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a__ , atol=1E-4 ) )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase , _UpperCAmelCase ):
A_ : Union[str, Any] = (ConvNextBackbone,) if is_torch_available() else ()
A_ : List[Any] = ConvNextConfig
A_ : Optional[Any] = False
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case = ConvNextModelTester(self )
| 24 | 1 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
a__ : Any = logging.get_logger(__name__)
a__ : Optional[Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
a__ : List[str] = {
'''vocab_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt'''
),
'''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-openqa''': (
'''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-reader''': (
'''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-openqa''': (
'''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-reader''': (
'''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json'''
),
},
}
a__ : List[str] = {
'''google/realm-cc-news-pretrained-embedder''': 5_1_2,
'''google/realm-cc-news-pretrained-encoder''': 5_1_2,
'''google/realm-cc-news-pretrained-scorer''': 5_1_2,
'''google/realm-cc-news-pretrained-openqa''': 5_1_2,
'''google/realm-orqa-nq-openqa''': 5_1_2,
'''google/realm-orqa-nq-reader''': 5_1_2,
'''google/realm-orqa-wq-openqa''': 5_1_2,
'''google/realm-orqa-wq-reader''': 5_1_2,
}
a__ : Dict = {
'''google/realm-cc-news-pretrained-embedder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-encoder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-scorer''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-reader''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-reader''': {'''do_lower_case''': True},
}
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
snake_case__ : Optional[Any] = VOCAB_FILES_NAMES
snake_case__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
snake_case__ : List[Any] = PRETRAINED_INIT_CONFIGURATION
snake_case__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ : Tuple = RealmTokenizer
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : str=None , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Optional[Any]="[UNK]" , UpperCAmelCase__ : str="[SEP]" , UpperCAmelCase__ : Optional[int]="[PAD]" , UpperCAmelCase__ : Dict="[CLS]" , UpperCAmelCase__ : List[Any]="[MASK]" , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : Tuple=None , **UpperCAmelCase__ : List[str] , ) -> Any:
super().__init__(
UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , do_lower_case=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , tokenize_chinese_chars=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ , **UpperCAmelCase__ , )
__SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , UpperCAmelCase__ ) != do_lower_case
or normalizer_state.get("strip_accents" , UpperCAmelCase__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , UpperCAmelCase__ ) != tokenize_chinese_chars
):
__SCREAMING_SNAKE_CASE = getattr(UpperCAmelCase__ , normalizer_state.pop("type" ) )
__SCREAMING_SNAKE_CASE = do_lower_case
__SCREAMING_SNAKE_CASE = strip_accents
__SCREAMING_SNAKE_CASE = tokenize_chinese_chars
__SCREAMING_SNAKE_CASE = normalizer_class(**UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = do_lower_case
def UpperCAmelCase_ ( self : int , UpperCAmelCase__ : int , **UpperCAmelCase__ : List[Any] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = PaddingStrategy.MAX_LENGTH
__SCREAMING_SNAKE_CASE = text
__SCREAMING_SNAKE_CASE = kwargs.pop("text_pair" , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = kwargs.pop("return_tensors" , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = {
"input_ids": [],
"attention_mask": [],
"token_type_ids": [],
}
for idx, candidate_text in enumerate(UpperCAmelCase__ ):
if batch_text_pair is not None:
__SCREAMING_SNAKE_CASE = batch_text_pair[idx]
else:
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = super().__call__(UpperCAmelCase__ , UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = encoded_candidates.get("input_ids" )
__SCREAMING_SNAKE_CASE = encoded_candidates.get("attention_mask" )
__SCREAMING_SNAKE_CASE = encoded_candidates.get("token_type_ids" )
if encoded_input_ids is not None:
output_data["input_ids"].append(UpperCAmelCase__ )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(UpperCAmelCase__ )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = {key: item for key, item in output_data.items() if len(UpperCAmelCase__ ) != 0}
return BatchEncoding(UpperCAmelCase__ , tensor_type=UpperCAmelCase__ )
def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : str=None ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
__SCREAMING_SNAKE_CASE = [self.sep_token_id]
__SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
__SCREAMING_SNAKE_CASE = self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
| 195 |
"""simple docstring"""
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
a__ : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
def __init__( self : List[str] , UpperCAmelCase__ : WhisperForConditionalGeneration , UpperCAmelCase__ : WhisperProcessor , UpperCAmelCase__ : AutoencoderKL , UpperCAmelCase__ : CLIPTextModel , UpperCAmelCase__ : CLIPTokenizer , UpperCAmelCase__ : UNetaDConditionModel , UpperCAmelCase__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , UpperCAmelCase__ : StableDiffusionSafetyChecker , UpperCAmelCase__ : CLIPImageProcessor , ) -> Optional[int]:
super().__init__()
if safety_checker is None:
logger.warning(
F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
speech_model=UpperCAmelCase__ , speech_processor=UpperCAmelCase__ , vae=UpperCAmelCase__ , text_encoder=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ , unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ , )
def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Optional[Union[str, int]] = "auto" ) -> str:
if slice_size == "auto":
__SCREAMING_SNAKE_CASE = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Tuple ) -> List[Any]:
self.enable_attention_slicing(UpperCAmelCase__ )
@torch.no_grad()
def __call__( self : str , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str=1_6_0_0_0 , UpperCAmelCase__ : int = 5_1_2 , UpperCAmelCase__ : int = 5_1_2 , UpperCAmelCase__ : int = 5_0 , UpperCAmelCase__ : float = 7.5 , UpperCAmelCase__ : Optional[Union[str, List[str]]] = None , UpperCAmelCase__ : Optional[int] = 1 , UpperCAmelCase__ : float = 0.0 , UpperCAmelCase__ : Optional[torch.Generator] = None , UpperCAmelCase__ : Optional[torch.FloatTensor] = None , UpperCAmelCase__ : Optional[str] = "pil" , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase__ : int = 1 , **UpperCAmelCase__ : Dict , ) -> Any:
__SCREAMING_SNAKE_CASE = self.speech_processor.feature_extractor(
UpperCAmelCase__ , return_tensors="pt" , sampling_rate=UpperCAmelCase__ ).input_features.to(self.device )
__SCREAMING_SNAKE_CASE = self.speech_model.generate(UpperCAmelCase__ , max_length=4_8_0_0_0_0 )
__SCREAMING_SNAKE_CASE = self.speech_processor.tokenizer.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ , normalize=UpperCAmelCase__ )[
0
]
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = 1
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ )
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(UpperCAmelCase__ )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(UpperCAmelCase__ )}.""" )
# get prompt text embeddings
__SCREAMING_SNAKE_CASE = self.tokenizer(
UpperCAmelCase__ , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
__SCREAMING_SNAKE_CASE = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__SCREAMING_SNAKE_CASE = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
__SCREAMING_SNAKE_CASE = text_input_ids[:, : self.tokenizer.model_max_length]
__SCREAMING_SNAKE_CASE = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = text_embeddings.shape
__SCREAMING_SNAKE_CASE = text_embeddings.repeat(1 , UpperCAmelCase__ , 1 )
__SCREAMING_SNAKE_CASE = text_embeddings.view(bs_embed * num_images_per_prompt , UpperCAmelCase__ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__SCREAMING_SNAKE_CASE = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__SCREAMING_SNAKE_CASE = 42
if negative_prompt is None:
__SCREAMING_SNAKE_CASE = [""] * batch_size
elif type(UpperCAmelCase__ ) is not type(UpperCAmelCase__ ):
raise TypeError(
F"""`negative_prompt` should be the same type to `prompt`, but got {type(UpperCAmelCase__ )} !="""
F""" {type(UpperCAmelCase__ )}.""" )
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = [negative_prompt]
elif batch_size != len(UpperCAmelCase__ ):
raise ValueError(
F"""`negative_prompt`: {negative_prompt} has batch size {len(UpperCAmelCase__ )}, but `prompt`:"""
F""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
" the batch size of `prompt`." )
else:
__SCREAMING_SNAKE_CASE = negative_prompt
__SCREAMING_SNAKE_CASE = text_input_ids.shape[-1]
__SCREAMING_SNAKE_CASE = self.tokenizer(
UpperCAmelCase__ , padding="max_length" , max_length=UpperCAmelCase__ , truncation=UpperCAmelCase__ , return_tensors="pt" , )
__SCREAMING_SNAKE_CASE = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__SCREAMING_SNAKE_CASE = uncond_embeddings.shape[1]
__SCREAMING_SNAKE_CASE = uncond_embeddings.repeat(1 , UpperCAmelCase__ , 1 )
__SCREAMING_SNAKE_CASE = uncond_embeddings.view(batch_size * num_images_per_prompt , UpperCAmelCase__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__SCREAMING_SNAKE_CASE = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__SCREAMING_SNAKE_CASE = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__SCREAMING_SNAKE_CASE = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__SCREAMING_SNAKE_CASE = torch.randn(UpperCAmelCase__ , generator=UpperCAmelCase__ , device="cpu" , dtype=UpperCAmelCase__ ).to(
self.device )
else:
__SCREAMING_SNAKE_CASE = torch.randn(UpperCAmelCase__ , generator=UpperCAmelCase__ , device=self.device , dtype=UpperCAmelCase__ )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
__SCREAMING_SNAKE_CASE = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(UpperCAmelCase__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__SCREAMING_SNAKE_CASE = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__SCREAMING_SNAKE_CASE = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__SCREAMING_SNAKE_CASE = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__SCREAMING_SNAKE_CASE = {}
if accepts_eta:
__SCREAMING_SNAKE_CASE = eta
for i, t in enumerate(self.progress_bar(UpperCAmelCase__ ) ):
# expand the latents if we are doing classifier free guidance
__SCREAMING_SNAKE_CASE = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__SCREAMING_SNAKE_CASE = self.scheduler.scale_model_input(UpperCAmelCase__ , UpperCAmelCase__ )
# predict the noise residual
__SCREAMING_SNAKE_CASE = self.unet(UpperCAmelCase__ , UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ ).sample
# perform guidance
if do_classifier_free_guidance:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = noise_pred.chunk(2 )
__SCREAMING_SNAKE_CASE = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__SCREAMING_SNAKE_CASE = self.scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = 1 / 0.18_215 * latents
__SCREAMING_SNAKE_CASE = self.vae.decode(UpperCAmelCase__ ).sample
__SCREAMING_SNAKE_CASE = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__SCREAMING_SNAKE_CASE = self.numpy_to_pil(UpperCAmelCase__ )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=UpperCAmelCase__ , nsfw_content_detected=UpperCAmelCase__ )
| 195 | 1 |
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class lowercase :
def __init__( self , _a , _a=2 , _a=32 , _a=16 , _a=3 , _a=True , _a=True , _a=32 , _a=4 , _a=[0, 1, 2, 3] , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=0.02 , _a=3 , _a=[1, 384, 24, 24] , _a=True , _a=None , ) -> Optional[int]:
_A : Optional[int] = parent
_A : int = batch_size
_A : Optional[int] = image_size
_A : List[str] = patch_size
_A : List[Any] = num_channels
_A : str = is_training
_A : List[str] = use_labels
_A : Union[str, Any] = hidden_size
_A : List[Any] = num_hidden_layers
_A : List[Any] = backbone_out_indices
_A : Any = num_attention_heads
_A : str = intermediate_size
_A : Optional[int] = hidden_act
_A : Dict = hidden_dropout_prob
_A : List[str] = attention_probs_dropout_prob
_A : List[str] = initializer_range
_A : Union[str, Any] = num_labels
_A : int = backbone_featmap_shape
_A : str = scope
_A : Any = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
_A : List[str] = (image_size // patch_size) ** 2
_A : str = num_patches + 1
def a__ ( self ) -> Dict:
_A : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A : Tuple = None
if self.use_labels:
_A : List[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_A : Optional[Any] = self.get_config()
return config, pixel_values, labels
def a__ ( self ) -> Optional[Any]:
_A : Optional[Any] = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [96, 192, 384, 768],
"""num_groups""": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=lowerCAmelCase_ , backbone_featmap_shape=self.backbone_featmap_shape , )
def a__ ( self , _a , _a , _a ) -> int:
_A : str = DPTModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A : Optional[Any] = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self , _a , _a , _a ) -> List[Any]:
_A : Union[str, Any] = self.num_labels
_A : Optional[Any] = DPTForDepthEstimation(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A : int = model(lowerCAmelCase_ )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def a__ ( self , _a , _a , _a ) -> Any:
_A : List[str] = self.num_labels
_A : Union[str, Any] = DPTForSemanticSegmentation(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A : Optional[Any] = model(lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def a__ ( self ) -> int:
_A : List[Any] = self.prepare_config_and_inputs()
_A , _A , _A : int = config_and_inputs
_A : Union[str, Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( __lowerCAmelCase,__lowerCAmelCase,unittest.TestCase ):
_a = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
_a = (
{
'''depth-estimation''': DPTForDepthEstimation,
'''feature-extraction''': DPTModel,
'''image-segmentation''': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_a = False
_a = False
_a = False
def a__ ( self ) -> Optional[int]:
_A : List[Any] = DPTModelTester(self )
_A : int = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=37 )
def a__ ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="""DPT does not use inputs_embeds""" )
def a__ ( self ) -> Optional[Any]:
pass
def a__ ( self ) -> Tuple:
_A , _A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : str = model_class(lowerCAmelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_A : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase_ , nn.Linear ) )
def a__ ( self ) -> Optional[Any]:
_A , _A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : List[str] = model_class(lowerCAmelCase_ )
_A : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A : Optional[Any] = [*signature.parameters.keys()]
_A : Dict = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def a__ ( self ) -> Union[str, Any]:
_A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def a__ ( self ) -> Tuple:
_A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*lowerCAmelCase_ )
def a__ ( self ) -> int:
_A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCAmelCase_ )
def a__ ( self ) -> Optional[Any]:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_A , _A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_A : Optional[int] = True
if model_class in get_values(lowerCAmelCase_ ):
continue
_A : Dict = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.train()
_A : Any = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
_A : Optional[Any] = model(**lowerCAmelCase_ ).loss
loss.backward()
def a__ ( self ) -> List[str]:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_A , _A : str = self.model_tester.prepare_config_and_inputs_for_common()
_A : List[Any] = False
_A : Any = True
if model_class in get_values(lowerCAmelCase_ ) or not model_class.supports_gradient_checkpointing:
continue
_A : Tuple = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.gradient_checkpointing_enable()
model.train()
_A : Union[str, Any] = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
_A : Dict = model(**lowerCAmelCase_ ).loss
loss.backward()
def a__ ( self ) -> Optional[int]:
_A , _A : Any = self.model_tester.prepare_config_and_inputs_for_common()
_A : List[str] = _config_zero_init(lowerCAmelCase_ )
for model_class in self.all_model_classes:
_A : Union[str, Any] = model_class(config=lowerCAmelCase_ )
# Skip the check for the backbone
_A : List[Any] = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
_A : Tuple = [F'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def a__ ( self ) -> Any:
pass
@slow
def a__ ( self ) -> List[str]:
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
_A : List[Any] = DPTModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def a__ ( self ) -> Any:
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
_A , _A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_A : Union[str, Any] = """add"""
with self.assertRaises(lowerCAmelCase_ ):
_A : List[Any] = DPTForDepthEstimation(lowerCAmelCase_ )
def lowerCAmelCase_ ( ):
_A : Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
@slow
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> List[Any]:
_A : List[Any] = DPTImageProcessor.from_pretrained("""Intel/dpt-hybrid-midas""" )
_A : str = DPTForDepthEstimation.from_pretrained("""Intel/dpt-hybrid-midas""" ).to(lowerCAmelCase_ )
_A : str = prepare_img()
_A : Tuple = image_processor(images=lowerCAmelCase_ , return_tensors="""pt""" ).to(lowerCAmelCase_ )
# forward pass
with torch.no_grad():
_A : Any = model(**lowerCAmelCase_ )
_A : Optional[int] = outputs.predicted_depth
# verify the predicted depth
_A : Optional[Any] = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , lowerCAmelCase_ )
_A : Optional[int] = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , lowerCAmelCase_ , atol=1e-4 ) )
| 26 | from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def snake_case ( snake_case__ :Union[str, Any] , snake_case__ :Dict) -> Any:
_A = []
for part_id in partition_order:
_A = df.where(F'''SPARK_PARTITION_ID() = {part_id}''').collect()
for row_idx, row in enumerate(snake_case__):
expected_row_ids_and_row_dicts.append((F'''{part_id}_{row_idx}''', row.asDict()))
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def snake_case ( ) -> Optional[Any]:
_A = pyspark.sql.SparkSession.builder.master("""local[*]""").appName("""pyspark""").getOrCreate()
_A = spark.range(100).repartition(1)
_A = Spark(snake_case__)
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16)
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def snake_case ( ) -> Union[str, Any]:
_A = pyspark.sql.SparkSession.builder.master("""local[*]""").appName("""pyspark""").getOrCreate()
_A = spark.range(10).repartition(2)
_A = [1, 0]
_A = _generate_iterable_examples(snake_case__ , snake_case__) # Reverse the partitions.
_A = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case__ , snake_case__)
for i, (row_id, row_dict) in enumerate(generate_fn()):
_A , _A = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def snake_case ( ) -> int:
_A = pyspark.sql.SparkSession.builder.master("""local[*]""").appName("""pyspark""").getOrCreate()
_A = spark.range(10).repartition(1)
_A = SparkExamplesIterable(snake_case__)
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(snake_case__):
assert row_id == F'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def snake_case ( ) -> Union[str, Any]:
_A = pyspark.sql.SparkSession.builder.master("""local[*]""").appName("""pyspark""").getOrCreate()
_A = spark.range(30).repartition(3)
# Mock the generator so that shuffle reverses the partition indices.
with patch("""numpy.random.Generator""") as generator_mock:
_A = lambda snake_case__: x.reverse()
_A = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case__ , [2, 1, 0])
_A = SparkExamplesIterable(snake_case__).shuffle_data_sources(snake_case__)
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(snake_case__):
_A , _A = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def snake_case ( ) -> List[str]:
_A = pyspark.sql.SparkSession.builder.master("""local[*]""").appName("""pyspark""").getOrCreate()
_A = spark.range(20).repartition(4)
# Partitions 0 and 2
_A = SparkExamplesIterable(snake_case__).shard_data_sources(worker_id=0 , num_workers=2)
assert shard_it_a.n_shards == 2
_A = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case__ , [0, 2])
for i, (row_id, row_dict) in enumerate(snake_case__):
_A , _A = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
_A = SparkExamplesIterable(snake_case__).shard_data_sources(worker_id=1 , num_workers=2)
assert shard_it_a.n_shards == 2
_A = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case__ , [1, 3])
for i, (row_id, row_dict) in enumerate(snake_case__):
_A , _A = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def snake_case ( ) -> Tuple:
_A = pyspark.sql.SparkSession.builder.master("""local[*]""").appName("""pyspark""").getOrCreate()
_A = spark.range(100).repartition(1)
_A = Spark(snake_case__)
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1)
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 180 | 0 |
"""simple docstring"""
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class _UpperCAmelCase ( lowerCAmelCase__):
_lowerCAmelCase : Dict = """new-model"""
if is_tf_available():
class _UpperCAmelCase ( lowerCAmelCase__):
_lowerCAmelCase : Any = NewModelConfig
@require_tf
class _UpperCAmelCase ( unittest.TestCase):
@slow
def _snake_case ( self : Dict ):
snake_case_ : Union[str, Any] = '''bert-base-cased'''
snake_case_ : Optional[Any] = AutoConfig.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
snake_case_ : str = TFAutoModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
@slow
def _snake_case ( self : int ):
snake_case_ : str = '''bert-base-cased'''
snake_case_ : Dict = AutoConfig.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
snake_case_ : List[Any] = TFAutoModelForPreTraining.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
@slow
def _snake_case ( self : Dict ):
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : List[Any] = AutoConfig.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
snake_case_ : Any = TFAutoModelForCausalLM.from_pretrained(lowercase_ )
snake_case_, snake_case_ : List[Any] = TFAutoModelForCausalLM.from_pretrained(lowercase_ , output_loading_info=lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
@slow
def _snake_case ( self : Tuple ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : Union[str, Any] = AutoConfig.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
snake_case_ : str = TFAutoModelWithLMHead.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
@slow
def _snake_case ( self : Any ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : Optional[Any] = AutoConfig.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
snake_case_ : Optional[int] = TFAutoModelForMaskedLM.from_pretrained(lowercase_ )
snake_case_, snake_case_ : Optional[int] = TFAutoModelForMaskedLM.from_pretrained(lowercase_ , output_loading_info=lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
@slow
def _snake_case ( self : Any ):
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : Dict = AutoConfig.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
snake_case_ : Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained(lowercase_ )
snake_case_, snake_case_ : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained(lowercase_ , output_loading_info=lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
@slow
def _snake_case ( self : Dict ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
snake_case_ : Any = AutoConfig.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
snake_case_ : Union[str, Any] = TFAutoModelForSequenceClassification.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
@slow
def _snake_case ( self : Optional[int] ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
snake_case_ : Tuple = AutoConfig.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
snake_case_ : List[str] = TFAutoModelForQuestionAnswering.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
@slow
@require_tensorflow_probability
def _snake_case ( self : Optional[int] ):
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
snake_case_ : Optional[int] = AutoConfig.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
snake_case_ : List[str] = TFAutoModelForTableQuestionAnswering.from_pretrained(lowercase_ )
snake_case_, snake_case_ : Tuple = TFAutoModelForTableQuestionAnswering.from_pretrained(
lowercase_ , output_loading_info=lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def _snake_case ( self : int ):
snake_case_ : Any = TFAutoModelWithLMHead.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=lowercase_ ) , 14410 )
def _snake_case ( self : Tuple ):
snake_case_ : Optional[Any] = TFAutoModelWithLMHead.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=lowercase_ ) , 14410 )
def _snake_case ( self : Dict ):
# For the auto model mapping, FunnelConfig has two models: FunnelModel and FunnelBaseModel
snake_case_ : List[str] = TFAutoModel.from_pretrained('''sgugger/funnel-random-tiny''' )
self.assertIsInstance(lowercase_ , lowercase_ )
snake_case_ : str = copy.deepcopy(model.config )
snake_case_ : Dict = ['''FunnelBaseModel''']
snake_case_ : Any = TFAutoModel.from_config(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowercase_ )
snake_case_ : Dict = TFAutoModel.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def _snake_case ( self : int ):
try:
AutoConfig.register('''new-model''' , lowercase_ )
snake_case_ : str = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(lowercase_ ):
auto_class.register(lowercase_ , lowercase_ )
auto_class.register(lowercase_ , lowercase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase_ ):
auto_class.register(lowercase_ , lowercase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
snake_case_ : Dict = BertModelTester(self ).get_config()
snake_case_ : Dict = NewModelConfig(**tiny_config.to_dict() )
snake_case_ : List[str] = auto_class.from_config(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowercase_ )
snake_case_ : Any = auto_class.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def _snake_case ( self : str ):
with self.assertRaisesRegex(
lowercase_ , '''bert-base is not a local folder and is not a valid model identifier''' ):
snake_case_ : Any = TFAutoModel.from_pretrained('''bert-base''' )
def _snake_case ( self : str ):
with self.assertRaisesRegex(
lowercase_ , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
snake_case_ : str = TFAutoModel.from_pretrained(lowercase_ , revision='''aaaaaa''' )
def _snake_case ( self : str ):
with self.assertRaisesRegex(
lowercase_ , '''hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin''' , ):
snake_case_ : Union[str, Any] = TFAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' )
def _snake_case ( self : List[str] ):
with self.assertRaisesRegex(lowercase_ , '''Use `from_pt=True` to load this model''' ):
snake_case_ : Optional[Any] = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' )
def _snake_case ( self : Union[str, Any] ):
# Make sure we have cached the model.
snake_case_ : Optional[int] = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
snake_case_ : int = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
snake_case_ : Any = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' )
with RequestCounter() as counter:
snake_case_ : int = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 155 |
"""simple docstring"""
def __lowercase ( _a , _a ):
return base * power(_a , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print('''Raise base to the power of exponent using recursion...''')
lowercase__ : Optional[Any] = int(input('''Enter the base: ''').strip())
lowercase__ : int = int(input('''Enter the exponent: ''').strip())
lowercase__ : int = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
lowercase__ : Any = 1 / result
print(f'{base} to the power of {exponent} is {result}')
| 155 | 1 |
"""simple docstring"""
from math import factorial
A_ = {str(d): factorial(d) for d in range(10)}
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
return sum(DIGIT_FACTORIAL[d] for d in str(snake_case__ ) )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : List[str] = 7 * factorial(9 ) + 1
return sum(i for i in range(3 , snake_case__ ) if sum_of_digit_factorial(snake_case__ ) == i )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 64 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : List[str]=False ):
"""simple docstring"""
_snake_case : Optional[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"module.blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"module.blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"module.blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"module.blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"module.blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"module.blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"module.blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"module.blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"module.blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"module.blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("""module.cls_token""", """vit.embeddings.cls_token"""),
("""module.patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""module.patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""module.pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""module.norm.weight""", """layernorm.weight"""),
("""module.norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_snake_case : Any = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : Dict , snake_case__ : List[str]=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
_snake_case : List[Any] = """"""
else:
_snake_case : List[Any] = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_snake_case : Optional[Any] = state_dict.pop(F"module.blocks.{i}.attn.qkv.weight" )
_snake_case : Optional[Any] = state_dict.pop(F"module.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
_snake_case : Optional[Any] = in_proj_weight[
: config.hidden_size, :
]
_snake_case : Union[str, Any] = in_proj_bias[: config.hidden_size]
_snake_case : Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_snake_case : Optional[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_snake_case : Union[str, Any] = in_proj_weight[
-config.hidden_size :, :
]
_snake_case : List[str] = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase__ (snake_case__ : str ):
"""simple docstring"""
_snake_case : Tuple = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
_snake_case : List[str] = [
"""module.fc.fc1.weight""",
"""module.fc.fc1.bias""",
"""module.fc.bn1.weight""",
"""module.fc.bn1.bias""",
"""module.fc.bn1.running_mean""",
"""module.fc.bn1.running_var""",
"""module.fc.bn1.num_batches_tracked""",
"""module.fc.fc2.weight""",
"""module.fc.fc2.bias""",
"""module.fc.bn2.weight""",
"""module.fc.bn2.bias""",
"""module.fc.bn2.running_mean""",
"""module.fc.bn2.running_var""",
"""module.fc.bn2.num_batches_tracked""",
"""module.fc.fc3.weight""",
"""module.fc.fc3.bias""",
]
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Tuple , snake_case__ : int ):
"""simple docstring"""
_snake_case : Optional[Any] = dct.pop(snake_case__ )
_snake_case : Union[str, Any] = val
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : str ):
"""simple docstring"""
_snake_case : str = ViTMSNConfig()
_snake_case : Any = 10_00
_snake_case : Tuple = """datasets/huggingface/label-files"""
_snake_case : Dict = """imagenet-1k-id2label.json"""
_snake_case : int = json.load(open(hf_hub_download(snake_case__ , snake_case__ ) , """r""" ) )
_snake_case : Any = {int(snake_case__ ): v for k, v in idalabel.items()}
_snake_case : List[Any] = idalabel
_snake_case : str = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
_snake_case : Tuple = 3_84
_snake_case : Dict = 15_36
_snake_case : Tuple = 6
elif "l16" in checkpoint_url:
_snake_case : Any = 10_24
_snake_case : int = 40_96
_snake_case : str = 24
_snake_case : Optional[int] = 16
_snake_case : List[Any] = 0.1
elif "b4" in checkpoint_url:
_snake_case : Tuple = 4
elif "l7" in checkpoint_url:
_snake_case : int = 7
_snake_case : Dict = 10_24
_snake_case : Optional[Any] = 40_96
_snake_case : Any = 24
_snake_case : Union[str, Any] = 16
_snake_case : Optional[int] = 0.1
_snake_case : int = ViTMSNModel(snake_case__ )
_snake_case : Optional[int] = torch.hub.load_state_dict_from_url(snake_case__ , map_location="""cpu""" )["""target_encoder"""]
_snake_case : List[str] = ViTImageProcessor(size=config.image_size )
remove_projection_head(snake_case__ )
_snake_case : List[str] = create_rename_keys(snake_case__ , base_model=snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
read_in_q_k_v(snake_case__ , snake_case__ , base_model=snake_case__ )
model.load_state_dict(snake_case__ )
model.eval()
_snake_case : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_snake_case : Tuple = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
_snake_case : str = ViTImageProcessor(
size=config.image_size , image_mean=snake_case__ , image_std=snake_case__ )
_snake_case : Any = image_processor(images=snake_case__ , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
_snake_case : int = model(**snake_case__ )
_snake_case : List[Any] = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
_snake_case : Optional[Any] = torch.tensor([[-1.09_15, -1.48_76, -1.18_09]] )
elif "b16" in checkpoint_url:
_snake_case : str = torch.tensor([[14.28_89, -18.90_45, 11.72_81]] )
elif "l16" in checkpoint_url:
_snake_case : Optional[int] = torch.tensor([[41.50_28, -22.86_81, 45.64_75]] )
elif "b4" in checkpoint_url:
_snake_case : List[Any] = torch.tensor([[-4.38_68, 5.29_32, -0.41_37]] )
else:
_snake_case : Optional[int] = torch.tensor([[-0.17_92, -0.64_65, 2.42_63]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , snake_case__ , atol=1e-4 )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(snake_case__ )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
A_ = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 64 | 1 |
import torch
from torch import nn
class A_ ( nn.Module ):
'''simple docstring'''
def __init__( self: Any , a: Any , a: Optional[int] , a: str , a: Dict , a: Optional[int]=1 , a: Union[str, Any]=False ):
super().__init__()
__lowerCamelCase : str = n_token
__lowerCamelCase : str = d_embed
__lowerCamelCase : List[str] = d_proj
__lowerCamelCase : Union[str, Any] = cutoffs + [n_token]
__lowerCamelCase : List[str] = [0] + self.cutoffs
__lowerCamelCase : Tuple = div_val
__lowerCamelCase : Optional[Any] = self.cutoffs[0]
__lowerCamelCase : str = len(self.cutoffs ) - 1
__lowerCamelCase : List[Any] = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
__lowerCamelCase : Tuple = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
__lowerCamelCase : Optional[int] = nn.Parameter(torch.zeros(self.n_clusters ) )
__lowerCamelCase : Optional[Any] = nn.ModuleList()
__lowerCamelCase : List[str] = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(a , a ) ) )
else:
self.out_projs.append(a )
self.out_layers.append(nn.Linear(a , a ) )
else:
for i in range(len(self.cutoffs ) ):
__lowerCamelCase , __lowerCamelCase : Any = self.cutoff_ends[i], self.cutoff_ends[i + 1]
__lowerCamelCase : Optional[int] = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(a , a ) ) )
self.out_layers.append(nn.Linear(a , r_idx - l_idx ) )
__lowerCamelCase : int = keep_order
def _snake_case ( self: List[str] , a: List[str] , a: Optional[Any] , a: int , a: int ):
if proj is None:
__lowerCamelCase : Any = nn.functional.linear(a , a , bias=a )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
__lowerCamelCase : int = nn.functional.linear(a , proj.t().contiguous() )
__lowerCamelCase : Optional[Any] = nn.functional.linear(a , a , bias=a )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def _snake_case ( self: Tuple , a: str , a: Any=None , a: List[str]=False ):
if labels is not None:
# Shift so that tokens < n predict n
__lowerCamelCase : Optional[int] = hidden[..., :-1, :].contiguous()
__lowerCamelCase : int = labels[..., 1:].contiguous()
__lowerCamelCase : Tuple = hidden.view(-1 , hidden.size(-1 ) )
__lowerCamelCase : Tuple = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('Input and labels should have the same size in the batch dimension.' )
else:
__lowerCamelCase : Any = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
__lowerCamelCase : List[Any] = self._compute_logit(a , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
__lowerCamelCase : Optional[Any] = labels != -100
__lowerCamelCase : Optional[int] = torch.zeros_like(a , dtype=hidden.dtype , device=hidden.device )
__lowerCamelCase : int = (
-nn.functional.log_softmax(a , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
__lowerCamelCase : Optional[Any] = nn.functional.log_softmax(a , dim=-1 )
else:
# construct weights and biases
__lowerCamelCase , __lowerCamelCase : Any = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
__lowerCamelCase , __lowerCamelCase : int = self.cutoff_ends[i], self.cutoff_ends[i + 1]
__lowerCamelCase : str = self.out_layers[0].weight[l_idx:r_idx]
__lowerCamelCase : Optional[int] = self.out_layers[0].bias[l_idx:r_idx]
else:
__lowerCamelCase : Dict = self.out_layers[i].weight
__lowerCamelCase : Dict = self.out_layers[i].bias
if i == 0:
__lowerCamelCase : Optional[int] = torch.cat([weight_i, self.cluster_weight] , dim=0 )
__lowerCamelCase : List[str] = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(a )
biases.append(a )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Tuple = weights[0], biases[0], self.out_projs[0]
__lowerCamelCase : str = self._compute_logit(a , a , a , a )
__lowerCamelCase : Any = nn.functional.log_softmax(a , dim=1 )
if labels is None:
__lowerCamelCase : Optional[Any] = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
__lowerCamelCase : Union[str, Any] = torch.zeros_like(a , dtype=hidden.dtype , device=hidden.device )
__lowerCamelCase : Dict = 0
__lowerCamelCase : Any = [0] + self.cutoffs
for i in range(len(a ) - 1 ):
__lowerCamelCase , __lowerCamelCase : Any = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
__lowerCamelCase : List[Any] = (labels >= l_idx) & (labels < r_idx)
__lowerCamelCase : Tuple = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
__lowerCamelCase : List[str] = labels.index_select(0 , a ) - l_idx
__lowerCamelCase : int = head_logprob.index_select(0 , a )
__lowerCamelCase : Any = hidden.index_select(0 , a )
else:
__lowerCamelCase : Dict = hidden
if i == 0:
if labels is not None:
__lowerCamelCase : Union[str, Any] = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
__lowerCamelCase : int = head_logprob[:, : self.cutoffs[0]]
else:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Any = weights[i], biases[i], self.out_projs[i]
__lowerCamelCase : Tuple = self._compute_logit(a , a , a , a )
__lowerCamelCase : Any = nn.functional.log_softmax(a , dim=1 )
__lowerCamelCase : List[str] = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
__lowerCamelCase : int = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
__lowerCamelCase : Tuple = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
__lowerCamelCase : int = logprob_i
if labels is not None:
if (hasattr(self , 'keep_order' ) and self.keep_order) or keep_order:
out.index_copy_(0 , a , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def _snake_case ( self: str , a: str ):
if self.n_clusters == 0:
__lowerCamelCase : Optional[int] = self._compute_logit(a , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(a , dim=-1 )
else:
# construct weights and biases
__lowerCamelCase , __lowerCamelCase : List[Any] = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
__lowerCamelCase , __lowerCamelCase : Optional[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
__lowerCamelCase : Union[str, Any] = self.out_layers[0].weight[l_idx:r_idx]
__lowerCamelCase : Dict = self.out_layers[0].bias[l_idx:r_idx]
else:
__lowerCamelCase : List[str] = self.out_layers[i].weight
__lowerCamelCase : str = self.out_layers[i].bias
if i == 0:
__lowerCamelCase : str = torch.cat([weight_i, self.cluster_weight] , dim=0 )
__lowerCamelCase : Union[str, Any] = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(a )
biases.append(a )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Dict = weights[0], biases[0], self.out_projs[0]
__lowerCamelCase : Any = self._compute_logit(a , a , a , a )
__lowerCamelCase : Tuple = hidden.new_empty((head_logit.size(0 ), self.n_token) )
__lowerCamelCase : int = nn.functional.log_softmax(a , dim=1 )
__lowerCamelCase : List[Any] = [0] + self.cutoffs
for i in range(len(a ) - 1 ):
__lowerCamelCase , __lowerCamelCase : int = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
__lowerCamelCase : int = head_logprob[:, : self.cutoffs[0]]
else:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : List[str] = weights[i], biases[i], self.out_projs[i]
__lowerCamelCase : Tuple = self._compute_logit(a , a , a , a )
__lowerCamelCase : Tuple = nn.functional.log_softmax(a , dim=1 )
__lowerCamelCase : Optional[Any] = head_logprob[:, -i] + tail_logprob_i
__lowerCamelCase : Optional[int] = logprob_i
return out
| 194 |
from __future__ import annotations
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : List[Any] = str(SCREAMING_SNAKE_CASE__ )
return len(SCREAMING_SNAKE_CASE__ ) == 9 and set(SCREAMING_SNAKE_CASE__ ) == set('123456789' )
def UpperCamelCase__ ( ):
for base_num in range(9_999 , 4_999 , -1 ):
__lowerCamelCase : Tuple = 100_002 * base_num
if is_9_pandigital(SCREAMING_SNAKE_CASE__ ):
return candidate
for base_num in range(333 , 99 , -1 ):
__lowerCamelCase : Union[str, Any] = 1_002_003 * base_num
if is_9_pandigital(SCREAMING_SNAKE_CASE__ ):
return candidate
return None
if __name__ == "__main__":
print(F"""{solution() = }""")
| 194 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def _A ( snake_case ) -> Tuple:
_lowercase : Optional[int] = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class a__ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : int = StableDiffusionLatentUpscalePipeline
_SCREAMING_SNAKE_CASE : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'height',
'width',
'cross_attention_kwargs',
'negative_prompt_embeds',
'prompt_embeds',
}
_SCREAMING_SNAKE_CASE : Optional[Any] = PipelineTesterMixin.required_optional_params - {'num_images_per_prompt'}
_SCREAMING_SNAKE_CASE : str = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_SCREAMING_SNAKE_CASE : Optional[int] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_SCREAMING_SNAKE_CASE : Tuple = frozenset([] )
_SCREAMING_SNAKE_CASE : str = True
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Tuple = 1
_lowercase : Union[str, Any] = 4
_lowercase : int = (16, 16)
_lowercase : int = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_UpperCamelCase )
return image
def _lowerCamelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
_lowercase : List[Any] = UNetaDConditionModel(
act_fn="gelu" , attention_head_dim=8 , norm_num_groups=_UpperCamelCase , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=160 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
"KDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
) , in_channels=8 , mid_block_type=_UpperCamelCase , only_cross_attention=_UpperCamelCase , out_channels=5 , resnet_time_scale_shift="scale_shift" , time_embedding_type="fourier" , timestep_post_act="gelu" , up_block_types=("KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KUpBlock2D") , )
_lowercase : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
_lowercase : List[str] = EulerDiscreteScheduler(prediction_type="sample" )
_lowercase : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="quick_gelu" , projection_dim=512 , )
_lowercase : Optional[int] = CLIPTextModel(_UpperCamelCase )
_lowercase : List[str] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_lowercase : List[Any] = {
"unet": model.eval(),
"vae": vae.eval(),
"scheduler": scheduler,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase=0 ):
"""simple docstring"""
if str(_UpperCamelCase ).startswith("mps" ):
_lowercase : str = torch.manual_seed(_UpperCamelCase )
else:
_lowercase : Optional[Any] = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
_lowercase : Any = {
"prompt": "A painting of a squirrel eating a burger",
"image": self.dummy_image.cpu(),
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Tuple = "cpu"
_lowercase : Tuple = self.get_dummy_components()
_lowercase : Optional[int] = self.pipeline_class(**_UpperCamelCase )
pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
_lowercase : List[str] = self.get_dummy_inputs(_UpperCamelCase )
_lowercase : str = pipe(**_UpperCamelCase ).images
_lowercase : Union[str, Any] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 256, 256, 3) )
_lowercase : str = np.array(
[0.4_7_2_2_2_4_1_2, 0.4_1_9_2_1_6_3_3, 0.4_4_7_1_7_4_3_4, 0.4_6_8_7_4_1_9_2, 0.4_2_5_8_8_2_5_8, 0.4_6_1_5_0_7_2_6, 0.4_6_7_7_5_3_4, 0.4_5_5_8_3_8_3_2, 0.4_8_5_7_9_0_5_5] )
_lowercase : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_UpperCamelCase , 1E-3 )
def _lowerCamelCase ( self ):
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=7E-3 )
def _lowerCamelCase ( self ):
"""simple docstring"""
super().test_cpu_offload_forward_pass(expected_max_diff=3E-3 )
def _lowerCamelCase ( self ):
"""simple docstring"""
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def _lowerCamelCase ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=7E-3 )
def _lowerCamelCase ( self ):
"""simple docstring"""
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3 )
def _lowerCamelCase ( self ):
"""simple docstring"""
super().test_save_load_local(expected_max_difference=3E-3 )
def _lowerCamelCase ( self ):
"""simple docstring"""
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : int = [
"DDIMScheduler",
"DDPMScheduler",
"PNDMScheduler",
"HeunDiscreteScheduler",
"EulerAncestralDiscreteScheduler",
"KDPM2DiscreteScheduler",
"KDPM2AncestralDiscreteScheduler",
"DPMSolverSDEScheduler",
]
_lowercase : Tuple = self.get_dummy_components()
_lowercase : Any = self.pipeline_class(**_UpperCamelCase )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=_UpperCamelCase )
pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
_lowercase : Any = self.get_dummy_inputs(_UpperCamelCase )
_lowercase : Any = 2
_lowercase : Dict = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
_lowercase : int = getattr(_UpperCamelCase , scheduler_enum.name )
_lowercase : Dict = scheduler_cls.from_config(pipe.scheduler.config )
_lowercase : List[str] = pipe(**_UpperCamelCase )[0]
outputs.append(_UpperCamelCase )
assert check_same_shape(_UpperCamelCase )
@require_torch_gpu
@slow
class a__ ( unittest.TestCase ):
def _lowerCamelCase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : List[Any] = torch.manual_seed(33 )
_lowercase : int = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" , torch_dtype=torch.floataa )
pipe.to("cuda" )
_lowercase : str = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler" , torch_dtype=torch.floataa )
upscaler.to("cuda" )
_lowercase : Union[str, Any] = "a photo of an astronaut high resolution, unreal engine, ultra realistic"
_lowercase : Union[str, Any] = pipe(_UpperCamelCase , generator=_UpperCamelCase , output_type="latent" ).images
_lowercase : Dict = upscaler(
prompt=_UpperCamelCase , image=_UpperCamelCase , num_inference_steps=20 , guidance_scale=0 , generator=_UpperCamelCase , output_type="np" , ).images[0]
_lowercase : Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy" )
assert np.abs((expected_image - image).mean() ) < 5E-2
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Dict = torch.manual_seed(33 )
_lowercase : List[Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler" , torch_dtype=torch.floataa )
upscaler.to("cuda" )
_lowercase : Tuple = "the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas"
_lowercase : Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png" )
_lowercase : int = upscaler(
prompt=_UpperCamelCase , image=_UpperCamelCase , num_inference_steps=20 , guidance_scale=0 , generator=_UpperCamelCase , output_type="np" , ).images[0]
_lowercase : Dict = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy" )
assert np.abs((expected_image - image).max() ) < 5E-2
| 250 |
'''simple docstring'''
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
_snake_case = logging.getLogger(__name__)
def _A ( snake_case , snake_case ) -> List[Any]:
# save results
if os.path.exists(snake_case ):
if os.path.exists(os.path.join(snake_case , "config.json" ) ) and os.path.isfile(
os.path.join(snake_case , "config.json" ) ):
os.remove(os.path.join(snake_case , "config.json" ) )
if os.path.exists(os.path.join(snake_case , "pytorch_model.bin" ) ) and os.path.isfile(
os.path.join(snake_case , "pytorch_model.bin" ) ):
os.remove(os.path.join(snake_case , "pytorch_model.bin" ) )
else:
os.makedirs(snake_case )
model.save_pretrained(snake_case )
def _A ( snake_case , snake_case=False ) -> int:
_lowercase : Union[str, Any] = 2
if unlogit:
_lowercase : Optional[Any] = torch.pow(snake_case , snake_case )
_lowercase : List[Any] = p * torch.log(snake_case )
_lowercase : str = 0
return -plogp.sum(dim=-1 )
def _A ( snake_case ) -> List[Any]:
logger.info("lv, h >\t" + "\t".join(F'''{x + 1}''' for x in range(len(snake_case ) ) ) )
for row in range(len(snake_case ) ):
if tensor.dtype != torch.long:
logger.info(F'''layer {row + 1}:\t''' + "\t".join(F'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(F'''layer {row + 1}:\t''' + "\t".join(F'''{x:d}''' for x in tensor[row].cpu().data ) )
def _A ( snake_case , snake_case , snake_case , snake_case=True , snake_case=True , snake_case=None , snake_case=False ) -> Optional[int]:
_lowercase , _lowercase : Union[str, Any] = model.config.num_hidden_layers, model.config.num_attention_heads
_lowercase : Optional[int] = torch.zeros(snake_case , snake_case ).to(args.device )
_lowercase : str = torch.zeros(snake_case , snake_case ).to(args.device )
if head_mask is None:
_lowercase : Any = torch.ones(snake_case , snake_case ).to(args.device )
head_mask.requires_grad_(requires_grad=snake_case )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
_lowercase : int = None
_lowercase : List[str] = 0.0
_lowercase : str = 0.0
for step, inputs in enumerate(tqdm(snake_case , desc="Iteration" , disable=args.local_rank not in [-1, 0] ) ):
_lowercase : Dict = tuple(t.to(args.device ) for t in inputs )
((_lowercase) , ) : Any = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
_lowercase : str = model(snake_case , labels=snake_case , head_mask=snake_case )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
_lowercase , _lowercase , _lowercase : Optional[int] = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(snake_case ):
_lowercase : Optional[int] = entropy(attn.detach() , snake_case )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(snake_case ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
_lowercase : List[str] = 2
_lowercase : Dict = torch.pow(torch.pow(snake_case , snake_case ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20
if not args.dont_normalize_global_importance:
_lowercase : str = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("Attention entropies" )
print_ad_tensor(snake_case )
if compute_importance:
logger.info("Head importance scores" )
print_ad_tensor(snake_case )
logger.info("Head ranked by importance scores" )
_lowercase : Any = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
_lowercase : Union[str, Any] = torch.arange(
head_importance.numel() , device=args.device )
_lowercase : Optional[Any] = head_ranks.view_as(snake_case )
print_ad_tensor(snake_case )
return attn_entropy, head_importance, total_loss
def _A ( snake_case , snake_case , snake_case ) -> Optional[Any]:
_lowercase , _lowercase , _lowercase : Union[str, Any] = compute_heads_importance(snake_case , snake_case , snake_case , compute_entropy=snake_case )
_lowercase : int = 1 / loss # instead of downsteam score use the LM loss
logger.info("Pruning: original score: %f, threshold: %f" , snake_case , original_score * args.masking_threshold )
_lowercase : List[Any] = torch.ones_like(snake_case )
_lowercase : Dict = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
_lowercase : Union[str, Any] = original_score
while current_score >= original_score * args.masking_threshold:
_lowercase : Any = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
_lowercase : Dict = float("Inf" )
_lowercase : Union[str, Any] = head_importance.view(-1 ).sort()[1]
if len(snake_case ) <= num_to_mask:
print("BREAK BY num_to_mask" )
break
# mask heads
_lowercase : List[str] = current_heads_to_mask[:num_to_mask]
logger.info("Heads to mask: %s" , str(current_heads_to_mask.tolist() ) )
_lowercase : int = new_head_mask.view(-1 )
_lowercase : Union[str, Any] = 0.0
_lowercase : Dict = new_head_mask.view_as(snake_case )
_lowercase : str = new_head_mask.clone().detach()
print_ad_tensor(snake_case )
# Compute metric and head importance again
_lowercase , _lowercase , _lowercase : Any = compute_heads_importance(
snake_case , snake_case , snake_case , compute_entropy=snake_case , head_mask=snake_case )
_lowercase : str = 1 / loss
logger.info(
"Masking: current score: %f, remaining heads %d (%.1f percents)" , snake_case , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_00 , )
logger.info("Final head mask" )
print_ad_tensor(snake_case )
np.save(os.path.join(args.output_dir , "head_mask.npy" ) , head_mask.detach().cpu().numpy() )
return head_mask
def _A ( snake_case , snake_case , snake_case , snake_case ) -> Any:
_lowercase : List[Any] = datetime.now()
_lowercase , _lowercase , _lowercase : List[Any] = compute_heads_importance(
snake_case , snake_case , snake_case , compute_entropy=snake_case , compute_importance=snake_case , head_mask=snake_case )
_lowercase : Tuple = 1 / loss
_lowercase : List[Any] = datetime.now() - before_time
_lowercase : int = sum(p.numel() for p in model.parameters() )
_lowercase : str = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(snake_case ) )
}
for k, v in heads_to_prune.items():
if isinstance(snake_case , snake_case ):
_lowercase : Optional[Any] = [
v,
]
assert sum(len(snake_case ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(snake_case )
_lowercase : List[str] = sum(p.numel() for p in model.parameters() )
_lowercase : int = datetime.now()
_lowercase , _lowercase , _lowercase : Any = compute_heads_importance(
snake_case , snake_case , snake_case , compute_entropy=snake_case , compute_importance=snake_case , head_mask=snake_case , actually_pruned=snake_case , )
_lowercase : List[Any] = 1 / loss
_lowercase : int = datetime.now() - before_time
logger.info(
"Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)" , snake_case , snake_case , pruned_num_params / original_num_params * 1_00 , )
logger.info("Pruning: score with masking: %f score with pruning: %f" , snake_case , snake_case )
logger.info("Pruning: speed ratio (original timing / new timing): %f percents" , original_time / new_time * 1_00 )
save_model(snake_case , args.output_dir )
def _A ( ) -> int:
_lowercase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir" , default=snake_case , type=snake_case , required=snake_case , help="The input data dir. Should contain the .tsv files (or other data files) for the task." , )
parser.add_argument(
"--model_name_or_path" , default=snake_case , type=snake_case , required=snake_case , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--output_dir" , default=snake_case , type=snake_case , required=snake_case , help="The output directory where the model predictions and checkpoints will be written." , )
# Other parameters
parser.add_argument(
"--config_name" , default="" , type=snake_case , help="Pretrained config name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--tokenizer_name" , default="" , type=snake_case , help="Pretrained tokenizer name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--cache_dir" , default=snake_case , type=snake_case , help="Where do you want to store the pre-trained models downloaded from s3" , )
parser.add_argument(
"--data_subset" , type=snake_case , default=-1 , help="If > 0: limit the data to a subset of data_subset instances." )
parser.add_argument(
"--overwrite_output_dir" , action="store_true" , help="Whether to overwrite data in output directory" )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
parser.add_argument(
"--dont_normalize_importance_by_layer" , action="store_true" , help="Don't normalize importance score by layers" )
parser.add_argument(
"--dont_normalize_global_importance" , action="store_true" , help="Don't normalize all importance scores between 0 and 1" , )
parser.add_argument(
"--try_masking" , action="store_true" , help="Whether to try to mask head until a threshold of accuracy." )
parser.add_argument(
"--masking_threshold" , default=0.9 , type=snake_case , help="masking threshold in term of metrics (stop masking when metric < threshold * original metric value)." , )
parser.add_argument(
"--masking_amount" , default=0.1 , type=snake_case , help="Amount to heads to masking at each masking step." )
parser.add_argument("--metric_name" , default="acc" , type=snake_case , help="Metric to use for head masking." )
parser.add_argument(
"--max_seq_length" , default=1_28 , type=snake_case , help=(
"The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, sequences shorter padded."
) , )
parser.add_argument("--batch_size" , default=1 , type=snake_case , help="Batch size." )
parser.add_argument("--seed" , type=snake_case , default=42 )
parser.add_argument("--local_rank" , type=snake_case , default=-1 , help="local_rank for distributed training on gpus" )
parser.add_argument("--no_cuda" , action="store_true" , help="Whether not to use CUDA when available" )
parser.add_argument("--server_ip" , type=snake_case , default="" , help="Can be used for distant debugging." )
parser.add_argument("--server_port" , type=snake_case , default="" , help="Can be used for distant debugging." )
_lowercase : Optional[int] = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=snake_case )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
_lowercase : Any = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu" )
_lowercase : Optional[int] = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
_lowercase : List[Any] = torch.device("cuda" , args.local_rank )
_lowercase : Dict = 1
torch.distributed.init_process_group(backend="nccl" ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info("device: {} n_gpu: {}, distributed: {}".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
_lowercase : List[Any] = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
_lowercase : str = nn.parallel.DistributedDataParallel(
snake_case , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=snake_case )
elif args.n_gpu > 1:
_lowercase : Dict = nn.DataParallel(snake_case )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=snake_case )
torch.save(snake_case , os.path.join(args.output_dir , "run_args.bin" ) )
logger.info("Training/evaluation parameters %s" , snake_case )
# Prepare dataset
_lowercase : Optional[Any] = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
_lowercase : List[str] = (torch.from_numpy(snake_case ),)
_lowercase : Dict = TensorDataset(*snake_case )
_lowercase : List[Any] = RandomSampler(snake_case )
_lowercase : str = DataLoader(snake_case , sampler=snake_case , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(snake_case , snake_case , snake_case )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
_lowercase : int = mask_heads(snake_case , snake_case , snake_case )
prune_heads(snake_case , snake_case , snake_case , snake_case )
if __name__ == "__main__":
main()
| 250 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
__UpperCAmelCase = None
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
__UpperCAmelCase = {
'vocab_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'
),
},
}
__UpperCAmelCase = {
'facebook/nllb-large-en-ro': 10_24,
'facebook/nllb-200-distilled-600M': 10_24,
}
# fmt: off
__UpperCAmelCase = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :Any = VOCAB_FILES_NAMES
UpperCAmelCase_ :str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ :str = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ :str = ["input_ids", "attention_mask"]
UpperCAmelCase_ :List[str] = NllbTokenizer
UpperCAmelCase_ :List[int] = []
UpperCAmelCase_ :List[int] = []
def __init__( self , __A=None , __A=None , __A="<s>" , __A="</s>" , __A="</s>" , __A="<s>" , __A="<unk>" , __A="<pad>" , __A="<mask>" , __A=None , __A=None , __A=None , __A=False , **__A , ) -> Optional[Any]:
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase_ :str = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token
lowerCAmelCase_ :List[Any] = legacy_behaviour
super().__init__(
vocab_file=__A , tokenizer_file=__A , bos_token=__A , eos_token=__A , sep_token=__A , cls_token=__A , unk_token=__A , pad_token=__A , mask_token=__A , src_lang=__A , tgt_lang=__A , additional_special_tokens=__A , legacy_behaviour=__A , **__A , )
lowerCAmelCase_ :Any = vocab_file
lowerCAmelCase_ :Dict = False if not self.vocab_file else True
lowerCAmelCase_ :List[Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
lowerCAmelCase_ :List[str] = {
lang_code: self.convert_tokens_to_ids(__A ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowerCAmelCase_ :int = src_lang if src_lang is not None else """eng_Latn"""
lowerCAmelCase_ :Any = self.convert_tokens_to_ids(self._src_lang )
lowerCAmelCase_ :str = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __lowerCAmelCase ( self ) -> str:
return self._src_lang
@src_lang.setter
def __lowerCAmelCase ( self , __A ) -> None:
lowerCAmelCase_ :Union[str, Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __lowerCAmelCase ( self , __A , __A = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __lowerCAmelCase ( self , __A , __A = None ) -> List[int]:
lowerCAmelCase_ :Tuple = [self.sep_token_id]
lowerCAmelCase_ :Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowerCAmelCase ( self , __A , __A , __A , __A , **__A ) -> str:
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
lowerCAmelCase_ :Tuple = src_lang
lowerCAmelCase_ :Union[str, Any] = self(__A , add_special_tokens=__A , return_tensors=__A , **__A )
lowerCAmelCase_ :Dict = self.convert_tokens_to_ids(__A )
lowerCAmelCase_ :Optional[int] = tgt_lang_id
return inputs
def __lowerCAmelCase ( self , __A , __A = "eng_Latn" , __A = None , __A = "fra_Latn" , **__A , ) -> BatchEncoding:
lowerCAmelCase_ :Optional[int] = src_lang
lowerCAmelCase_ :str = tgt_lang
return super().prepare_seqaseq_batch(__A , __A , **__A )
def __lowerCAmelCase ( self ) -> Tuple:
return self.set_src_lang_special_tokens(self.src_lang )
def __lowerCAmelCase ( self ) -> Dict:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __lowerCAmelCase ( self , __A ) -> None:
lowerCAmelCase_ :Optional[int] = self.convert_tokens_to_ids(__A )
if self.legacy_behaviour:
lowerCAmelCase_ :Any = []
lowerCAmelCase_ :Any = [self.eos_token_id, self.cur_lang_code]
else:
lowerCAmelCase_ :int = [self.cur_lang_code]
lowerCAmelCase_ :Any = [self.eos_token_id]
lowerCAmelCase_ :int = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCAmelCase_ :Optional[int] = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCAmelCase_ :Dict = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __lowerCAmelCase ( self , __A ) -> None:
lowerCAmelCase_ :Optional[int] = self.convert_tokens_to_ids(__A )
if self.legacy_behaviour:
lowerCAmelCase_ :Optional[Any] = []
lowerCAmelCase_ :Tuple = [self.eos_token_id, self.cur_lang_code]
else:
lowerCAmelCase_ :Union[str, Any] = [self.cur_lang_code]
lowerCAmelCase_ :Tuple = [self.eos_token_id]
lowerCAmelCase_ :Optional[int] = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCAmelCase_ :List[str] = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCAmelCase_ :Optional[int] = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __lowerCAmelCase ( self , __A , __A = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(__A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" )
return
lowerCAmelCase_ :Tuple = os.path.join(
__A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ):
copyfile(self.vocab_file , __A )
return (out_vocab_file,)
| 1 |
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :str = "detr"
UpperCAmelCase_ :str = ["past_key_values"]
UpperCAmelCase_ :Tuple = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , __A=True , __A=None , __A=3 , __A=100 , __A=6 , __A=2048 , __A=8 , __A=6 , __A=2048 , __A=8 , __A=0.0 , __A=0.0 , __A=True , __A="relu" , __A=256 , __A=0.1 , __A=0.0 , __A=0.0 , __A=0.0_2 , __A=1.0 , __A=False , __A="sine" , __A="resnet50" , __A=True , __A=False , __A=1 , __A=5 , __A=2 , __A=1 , __A=1 , __A=5 , __A=2 , __A=0.1 , **__A , ) -> List[Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
lowerCAmelCase_ :int = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(__A , __A ):
lowerCAmelCase_ :str = backbone_config.get("""model_type""" )
lowerCAmelCase_ :List[Any] = CONFIG_MAPPING[backbone_model_type]
lowerCAmelCase_ :Optional[Any] = config_class.from_dict(__A )
# set timm attributes to None
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = None, None, None
lowerCAmelCase_ :Tuple = use_timm_backbone
lowerCAmelCase_ :Optional[int] = backbone_config
lowerCAmelCase_ :Optional[int] = num_channels
lowerCAmelCase_ :int = num_queries
lowerCAmelCase_ :List[Any] = d_model
lowerCAmelCase_ :Optional[int] = encoder_ffn_dim
lowerCAmelCase_ :Tuple = encoder_layers
lowerCAmelCase_ :int = encoder_attention_heads
lowerCAmelCase_ :Optional[Any] = decoder_ffn_dim
lowerCAmelCase_ :List[str] = decoder_layers
lowerCAmelCase_ :Dict = decoder_attention_heads
lowerCAmelCase_ :Dict = dropout
lowerCAmelCase_ :Tuple = attention_dropout
lowerCAmelCase_ :Union[str, Any] = activation_dropout
lowerCAmelCase_ :Any = activation_function
lowerCAmelCase_ :List[str] = init_std
lowerCAmelCase_ :Optional[int] = init_xavier_std
lowerCAmelCase_ :int = encoder_layerdrop
lowerCAmelCase_ :Union[str, Any] = decoder_layerdrop
lowerCAmelCase_ :List[str] = encoder_layers
lowerCAmelCase_ :Union[str, Any] = auxiliary_loss
lowerCAmelCase_ :str = position_embedding_type
lowerCAmelCase_ :List[Any] = backbone
lowerCAmelCase_ :str = use_pretrained_backbone
lowerCAmelCase_ :str = dilation
# Hungarian matcher
lowerCAmelCase_ :List[Any] = class_cost
lowerCAmelCase_ :Union[str, Any] = bbox_cost
lowerCAmelCase_ :Tuple = giou_cost
# Loss coefficients
lowerCAmelCase_ :Optional[int] = mask_loss_coefficient
lowerCAmelCase_ :Union[str, Any] = dice_loss_coefficient
lowerCAmelCase_ :Tuple = bbox_loss_coefficient
lowerCAmelCase_ :Tuple = giou_loss_coefficient
lowerCAmelCase_ :Dict = eos_coefficient
super().__init__(is_encoder_decoder=__A , **__A )
@property
def __lowerCAmelCase ( self ) -> int:
return self.encoder_attention_heads
@property
def __lowerCAmelCase ( self ) -> int:
return self.d_model
@classmethod
def __lowerCAmelCase ( cls , __A , **__A ) -> Any:
return cls(backbone_config=__A , **__A )
def __lowerCAmelCase ( self ) -> Dict[str, any]:
lowerCAmelCase_ :List[str] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
lowerCAmelCase_ :Dict = self.backbone_config.to_dict()
lowerCAmelCase_ :str = self.__class__.model_type
return output
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :List[Any] = version.parse("1.11" )
@property
def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def __lowerCAmelCase ( self ) -> float:
return 1E-5
@property
def __lowerCAmelCase ( self ) -> int:
return 12
| 1 | 1 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
__a :List[str] = logging.get_logger(__name__)
__a :Union[str, Any] = {
'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json',
'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json',
'microsoft/deberta-v2-xlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'
),
'microsoft/deberta-v2-xxlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'
),
}
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = 'deberta-v2'
def __init__( self : Tuple , UpperCAmelCase : Optional[Any]=128100 , UpperCAmelCase : List[Any]=1536 , UpperCAmelCase : Tuple=24 , UpperCAmelCase : List[Any]=24 , UpperCAmelCase : Tuple=6144 , UpperCAmelCase : List[str]="gelu" , UpperCAmelCase : Optional[Any]=0.1 , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : Any=512 , UpperCAmelCase : Any=0 , UpperCAmelCase : List[Any]=0.02 , UpperCAmelCase : Any=1E-7 , UpperCAmelCase : Any=False , UpperCAmelCase : List[str]=-1 , UpperCAmelCase : int=0 , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : Union[str, Any]=0 , UpperCAmelCase : Optional[int]="gelu" , **UpperCAmelCase : Optional[Any] , ):
super().__init__(**UpperCAmelCase )
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = max_position_embeddings
A_ = type_vocab_size
A_ = initializer_range
A_ = relative_attention
A_ = max_relative_positions
A_ = pad_token_id
A_ = position_biased_input
# Backwards compatibility
if type(UpperCAmelCase ) == str:
A_ = [x.strip() for x in pos_att_type.lower().split("|" )]
A_ = pos_att_type
A_ = vocab_size
A_ = layer_norm_eps
A_ = kwargs.get("pooler_hidden_size" , UpperCAmelCase )
A_ = pooler_dropout
A_ = pooler_hidden_act
class _a ( snake_case_ ):
"""simple docstring"""
@property
def __A ( self : Dict ):
if self.task == "multiple-choice":
A_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
A_ = {0: "batch", 1: "sequence"}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis)] )
else:
return OrderedDict([("input_ids", dynamic_axis), ("attention_mask", dynamic_axis)] )
@property
def __A ( self : List[str] ):
return 12
def __A ( self : Union[str, Any] , UpperCAmelCase : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , UpperCAmelCase : int = -1 , UpperCAmelCase : int = -1 , UpperCAmelCase : int = -1 , UpperCAmelCase : bool = False , UpperCAmelCase : Optional["TensorType"] = None , UpperCAmelCase : int = 3 , UpperCAmelCase : int = 40 , UpperCAmelCase : int = 40 , UpperCAmelCase : "PreTrainedTokenizerBase" = None , ):
A_ = super().generate_dummy_inputs(preprocessor=UpperCAmelCase , framework=UpperCAmelCase )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs | 312 |
def __snake_case ( __UpperCamelCase : int = 1000 ):
"""simple docstring"""
return sum(e for e in range(3 ,__UpperCamelCase ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F"{solution() = }") | 312 | 1 |
"""simple docstring"""
import argparse
import os
import re
UpperCamelCase__ = 'src/diffusers'
# Pattern that looks at the indentation in a line.
UpperCamelCase__ = re.compile(R'^(\s*)\S')
# Pattern that matches `"key":" and puts `key` in group 0.
UpperCamelCase__ = re.compile(R'^\s*"([^"]+)":')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
UpperCamelCase__ = re.compile(R'^\s*_import_structure\["([^"]+)"\]')
# Pattern that matches `"key",` and puts `key` in group 0.
UpperCamelCase__ = re.compile(R'^\s*"([^"]+)",\s*$')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
UpperCamelCase__ = re.compile(R'\[([^\]]+)\]')
def lowerCAmelCase_ ( __A ) -> str:
'''simple docstring'''
UpperCAmelCase__ = _re_indent.search(__A )
return "" if search is None else search.groups()[0]
def lowerCAmelCase_ ( __A, __A="", __A=None, __A=None ) -> str:
'''simple docstring'''
UpperCAmelCase__ = 0
UpperCAmelCase__ = code.split("\n" )
if start_prompt is not None:
while not lines[index].startswith(__A ):
index += 1
UpperCAmelCase__ = ["\n".join(lines[:index] )]
else:
UpperCAmelCase__ = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
UpperCAmelCase__ = [lines[index]]
index += 1
while index < len(__A ) and (end_prompt is None or not lines[index].startswith(__A )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(__A ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + " " ):
current_block.append(lines[index] )
blocks.append("\n".join(__A ) )
if index < len(__A ) - 1:
UpperCAmelCase__ = [lines[index + 1]]
index += 1
else:
UpperCAmelCase__ = []
else:
blocks.append("\n".join(__A ) )
UpperCAmelCase__ = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(__A ) > 0:
blocks.append("\n".join(__A ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(__A ):
blocks.append("\n".join(lines[index:] ) )
return blocks
def lowerCAmelCase_ ( __A ) -> Tuple:
'''simple docstring'''
def _inner(__A ):
return key(__A ).lower().replace("_", "" )
return _inner
def lowerCAmelCase_ ( __A, __A=None ) -> List[str]:
'''simple docstring'''
def noop(__A ):
return x
if key is None:
UpperCAmelCase__ = noop
# Constants are all uppercase, they go first.
UpperCAmelCase__ = [obj for obj in objects if key(__A ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
UpperCAmelCase__ = [obj for obj in objects if key(__A )[0].isupper() and not key(__A ).isupper()]
# Functions begin with a lowercase, they go last.
UpperCAmelCase__ = [obj for obj in objects if not key(__A )[0].isupper()]
UpperCAmelCase__ = ignore_underscore(__A )
return sorted(__A, key=__A ) + sorted(__A, key=__A ) + sorted(__A, key=__A )
def lowerCAmelCase_ ( __A ) -> Optional[Any]:
'''simple docstring'''
def _replace(__A ):
UpperCAmelCase__ = match.groups()[0]
if "," not in imports:
return f"""[{imports}]"""
UpperCAmelCase__ = [part.strip().replace("\"", "" ) for part in imports.split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
UpperCAmelCase__ = keys[:-1]
return "[" + ", ".join([f"""\"{k}\"""" for k in sort_objects(__A )] ) + "]"
UpperCAmelCase__ = import_statement.split("\n" )
if len(__A ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
UpperCAmelCase__ = 2 if lines[1].strip() == "[" else 1
UpperCAmelCase__ = [(i, _re_strip_line.search(__A ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
UpperCAmelCase__ = sort_objects(__A, key=lambda __A : x[1] )
UpperCAmelCase__ = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(__A ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
UpperCAmelCase__ = _re_bracket_content.sub(_replace, lines[1] )
else:
UpperCAmelCase__ = [part.strip().replace("\"", "" ) for part in lines[1].split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
UpperCAmelCase__ = keys[:-1]
UpperCAmelCase__ = get_indent(lines[1] ) + ", ".join([f"""\"{k}\"""" for k in sort_objects(__A )] )
return "\n".join(__A )
else:
# Finally we have to deal with imports fitting on one line
UpperCAmelCase__ = _re_bracket_content.sub(_replace, __A )
return import_statement
def lowerCAmelCase_ ( __A, __A=True ) -> Any:
'''simple docstring'''
with open(__A, "r" ) as f:
UpperCAmelCase__ = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
UpperCAmelCase__ = split_code_in_indented_blocks(
__A, start_prompt="_import_structure = {", end_prompt="if TYPE_CHECKING:" )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1, len(__A ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
UpperCAmelCase__ = main_blocks[block_idx]
UpperCAmelCase__ = block.split("\n" )
# Get to the start of the imports.
UpperCAmelCase__ = 0
while line_idx < len(__A ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
UpperCAmelCase__ = len(__A )
else:
line_idx += 1
if line_idx >= len(__A ):
continue
# Ignore beginning and last line: they don't contain anything.
UpperCAmelCase__ = "\n".join(block_lines[line_idx:-1] )
UpperCAmelCase__ = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
UpperCAmelCase__ = split_code_in_indented_blocks(__A, indent_level=__A )
# We have two categories of import key: list or _import_structure[key].append/extend
UpperCAmelCase__ = _re_direct_key if "_import_structure" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
UpperCAmelCase__ = [(pattern.search(__A ).groups()[0] if pattern.search(__A ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
UpperCAmelCase__ = [(i, key) for i, key in enumerate(__A ) if key is not None]
UpperCAmelCase__ = [x[0] for x in sorted(__A, key=lambda __A : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
UpperCAmelCase__ = 0
UpperCAmelCase__ = []
for i in range(len(__A ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
UpperCAmelCase__ = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(__A )
count += 1
# And we put our main block back together with its first and last line.
UpperCAmelCase__ = "\n".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(__A ):
if check_only:
return True
else:
print(f"""Overwriting {file}.""" )
with open(__A, "w" ) as f:
f.write("\n".join(__A ) )
def lowerCAmelCase_ ( __A=True ) -> List[str]:
'''simple docstring'''
UpperCAmelCase__ = []
for root, _, files in os.walk(__A ):
if "__init__.py" in files:
UpperCAmelCase__ = sort_imports(os.path.join(__A, "__init__.py" ), check_only=__A )
if result:
UpperCAmelCase__ = [os.path.join(__A, "__init__.py" )]
if len(__A ) > 0:
raise ValueError(f"""Would overwrite {len(__A )} files, run `make style`.""" )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
UpperCamelCase__ = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 350 | # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : List[Any] = 'facebook/bart-large-mnli'
__UpperCAmelCase : Optional[Any] = (
'This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '
'should be the text to classify, and `labels`, which should be the list of labels to use for classification. '
'It returns the most likely label in the list of provided `labels` for the input text.'
)
__UpperCAmelCase : Optional[int] = 'text_classifier'
__UpperCAmelCase : int = AutoTokenizer
__UpperCAmelCase : Dict = AutoModelForSequenceClassification
__UpperCAmelCase : int = ['text', ['text']]
__UpperCAmelCase : Optional[int] = ['text']
def lowercase_ (self : List[Any] ) -> List[str]:
"""simple docstring"""
super().setup()
UpperCAmelCase__ = self.model.config
UpperCAmelCase__ = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("entail" ):
UpperCAmelCase__ = int(__UpperCAmelCase )
if self.entailment_id == -1:
raise ValueError("Could not determine the entailment ID from the model config, please pass it at init." )
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : int ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = labels
return self.pre_processor(
[text] * len(__UpperCAmelCase ) , [f"""This example is {label}""" for label in labels] , return_tensors="pt" , padding="max_length" , )
def lowercase_ (self : Dict , __UpperCAmelCase : Tuple ) -> int:
"""simple docstring"""
UpperCAmelCase__ = outputs.logits
UpperCAmelCase__ = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 143 | 0 |
"""simple docstring"""
from __future__ import annotations
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->tuple[float, list[float]]:
"""simple docstring"""
lowerCAmelCase__ :List[Any] = list(range(len(_SCREAMING_SNAKE_CASE ) ) )
lowerCAmelCase__ :str = [v / w for v, w in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )]
index.sort(key=lambda _SCREAMING_SNAKE_CASE : ratio[i] , reverse=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :float = 0
lowerCAmelCase__ :list[float] = [0] * len(_SCREAMING_SNAKE_CASE )
for i in index:
if weight[i] <= capacity:
lowerCAmelCase__ :Optional[int] = 1
max_value += value[i]
capacity -= weight[i]
else:
lowerCAmelCase__ :Tuple = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 293 |
"""simple docstring"""
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
"""simple docstring"""
if discount_rate < 0:
raise ValueError('Discount rate cannot be negative' )
if not cash_flows:
raise ValueError('Cash flows list cannot be empty' )
lowerCAmelCase__ :Union[str, Any] = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(_SCREAMING_SNAKE_CASE ) )
return round(_SCREAMING_SNAKE_CASE , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 293 | 1 |
'''simple docstring'''
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
lowercase__ : Dict = len(UpperCAmelCase )
print('''The following activities are selected:''' )
# The first activity is always selected
lowercase__ : str = 0
print(UpperCAmelCase , end=''',''' )
# Consider rest of the activities
for j in range(UpperCAmelCase ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(UpperCAmelCase , end=''',''' )
lowercase__ : str = j
if __name__ == "__main__":
import doctest
doctest.testmod()
__a: str = [1, 3, 0, 5, 8, 5]
__a: Optional[Any] = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 361 | '''simple docstring'''
class UpperCAmelCase :
'''simple docstring'''
def __init__( self ) -> List[str]:
lowercase__ : Dict = {}
def _lowerCAmelCase( self ) -> None:
print(self.vertex )
for i in self.vertex:
print(__lowerCAmelCase , ''' -> ''' , ''' -> '''.join([str(__lowerCAmelCase ) for j in self.vertex[i]] ) )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase ) -> None:
# check if vertex is already present,
if from_vertex in self.vertex:
self.vertex[from_vertex].append(__lowerCAmelCase )
else:
# else make a new vertex
lowercase__ : Union[str, Any] = [to_vertex]
def _lowerCAmelCase( self ) -> None:
# visited array for storing already visited nodes
lowercase__ : str = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(__lowerCAmelCase , __lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase ) -> None:
# mark start vertex as visited
lowercase__ : List[str] = True
print(__lowerCAmelCase , end=''' ''' )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
__a: Optional[Any] = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("""DFS:""")
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 214 | 0 |
'''simple docstring'''
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def lowercase_ ( lowerCAmelCase__ : Dict ):
"""simple docstring"""
return EnvironmentCommand()
class _A ( __SCREAMING_SNAKE_CASE ):
@staticmethod
def __A ( __UpperCAmelCase ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Dict = parser.add_parser("""env""" )
download_parser.set_defaults(func=__UpperCAmelCase )
def __A ( self ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Tuple = huggingface_hub.__version__
__UpperCAmelCase : Optional[Any] = """not installed"""
__UpperCAmelCase : str = """NA"""
if is_torch_available():
import torch
__UpperCAmelCase : Union[str, Any] = torch.__version__
__UpperCAmelCase : str = torch.cuda.is_available()
__UpperCAmelCase : int = """not installed"""
if is_transformers_available():
import transformers
__UpperCAmelCase : Dict = transformers.__version__
__UpperCAmelCase : Any = """not installed"""
if is_accelerate_available():
import accelerate
__UpperCAmelCase : List[str] = accelerate.__version__
__UpperCAmelCase : List[Any] = """not installed"""
if is_xformers_available():
import xformers
__UpperCAmelCase : Union[str, Any] = xformers.__version__
__UpperCAmelCase : List[str] = {
"""`diffusers` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""PyTorch version (GPU?)""": f'{pt_version} ({pt_cuda_available})',
"""Huggingface_hub version""": hub_version,
"""Transformers version""": transformers_version,
"""Accelerate version""": accelerate_version,
"""xFormers version""": xformers_version,
"""Using GPU in script?""": """<fill in>""",
"""Using distributed or parallel set-up in script?""": """<fill in>""",
}
print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" )
print(self.format_dict(__UpperCAmelCase ) )
return info
@staticmethod
def __A ( __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
return "\n".join([f'- {prop}: {val}' for prop, val in d.items()] ) + "\n"
| 254 |
'''simple docstring'''
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
_UpperCamelCase = None
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
_UpperCamelCase = {
'''vocab_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/tokenizer.json''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/tokenizer.json''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/tokenizer.json''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/tokenizer.json''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/tokenizer.json''',
},
}
# TODO(PVP) - this should be removed in Transformers v5
_UpperCamelCase = {
'''t5-small''': 512,
'''t5-base''': 512,
'''t5-large''': 512,
'''t5-3b''': 512,
'''t5-11b''': 512,
}
class _A ( __SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : str = ["input_ids", "attention_mask"]
_SCREAMING_SNAKE_CASE : Optional[Any] = TaTokenizer
_SCREAMING_SNAKE_CASE : List[int] = []
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="</s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase=100 , __UpperCAmelCase=None , **__UpperCAmelCase , ) -> List[Any]:
'''simple docstring'''
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
__UpperCAmelCase : List[Any] = [f'<extra_id_{i}>' for i in range(__UpperCAmelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
__UpperCAmelCase : Any = len(set(filter(lambda __UpperCAmelCase : bool("""extra_id_""" in str(__UpperCAmelCase ) ) , __UpperCAmelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
""" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"""
""" tokens""" )
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , extra_ids=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , **__UpperCAmelCase , )
__UpperCAmelCase : Optional[int] = vocab_file
__UpperCAmelCase : Any = False if not self.vocab_file else True
__UpperCAmelCase : Optional[int] = extra_ids
@staticmethod
def __A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
__UpperCAmelCase : int = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"""This tokenizer was incorrectly instantiated with a model max length of"""
f' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'
""" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"""
""" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"""
f' {pretrained_model_name_or_path} automatically truncating your input to'
f' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'
f' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'
""" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"""
""" instantiate this tokenizer with `model_max_length` set to your preferred value.""" , __UpperCAmelCase , )
return max_model_length
def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__UpperCAmelCase : Any = os.path.join(
__UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ):
copyfile(self.vocab_file , __UpperCAmelCase )
logger.info(f'Copy vocab file to {out_vocab_file}' )
return (out_vocab_file,)
def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
'''simple docstring'''
__UpperCAmelCase : str = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
__UpperCAmelCase : Optional[Any] = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __A ( self ) -> Any:
'''simple docstring'''
return list(
set(filter(lambda __UpperCAmelCase : bool(re.search(r"""<extra_id_\d+>""" , __UpperCAmelCase ) ) is not None , self.additional_special_tokens ) ) )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
return [self.convert_tokens_to_ids(__UpperCAmelCase ) for token in self.get_sentinel_tokens()]
| 254 | 1 |
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> int:
# Load configuration defined in the metadata file
with open(__lowerCamelCase ) as metadata_file:
a = json.load(__lowerCamelCase )
a = LukeConfig(use_entity_aware_attention=__lowerCamelCase , **metadata["""model_config"""] )
# Load in the weights from the checkpoint_path
a = torch.load(__lowerCamelCase , map_location="""cpu""" )
# Load the entity vocab file
a = load_entity_vocab(__lowerCamelCase )
a = RobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] )
# Add special tokens to the token vocabulary for downstream tasks
a = AddedToken("""<ent>""" , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase )
a = AddedToken("""<ent2>""" , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase )
tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(__lowerCamelCase )
with open(os.path.join(__lowerCamelCase , LukeTokenizer.vocab_files_names["""entity_vocab_file"""] ) , """w""" ) as f:
json.dump(__lowerCamelCase , __lowerCamelCase )
a = LukeTokenizer.from_pretrained(__lowerCamelCase )
# Initialize the embeddings of the special tokens
a = state_dict["""embeddings.word_embeddings.weight"""]
a = word_emb[tokenizer.convert_tokens_to_ids(["""@"""] )[0]].unsqueeze(0 )
a = word_emb[tokenizer.convert_tokens_to_ids(["""#"""] )[0]].unsqueeze(0 )
a = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
a = f'encoder.layer.{layer_index}.attention.self.'
a = state_dict[prefix + matrix_name]
a = state_dict[prefix + matrix_name]
a = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
a = state_dict["""entity_embeddings.entity_embeddings.weight"""]
a = entity_emb[entity_vocab["""[MASK]"""]]
a = LukeModel(config=__lowerCamelCase ).eval()
a , a = model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
if not (len(__lowerCamelCase ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(f'Missing keys {", ".join(__lowerCamelCase )}. Expected only missing embeddings.position_ids' )
if not (all(key.startswith("""entity_predictions""" ) or key.startswith("""lm_head""" ) for key in unexpected_keys )):
raise ValueError(
"""Unexpected keys"""
f' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}' )
# Check outputs
a = LukeTokenizer.from_pretrained(__lowerCamelCase , task="""entity_classification""" )
a = (
"""Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the"""
""" new world number one avoid a humiliating second- round exit at Wimbledon ."""
)
a = (39, 42)
a = tokenizer(__lowerCamelCase , entity_spans=[span] , add_prefix_space=__lowerCamelCase , return_tensors="""pt""" )
a = model(**__lowerCamelCase )
# Verify word hidden states
if model_size == "large":
a = torch.Size((1, 42, 1024) )
a = torch.tensor(
[[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]] )
else: # base
a = torch.Size((1, 42, 768) )
a = torch.tensor([[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , __lowerCamelCase , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
a = torch.Size((1, 1, 1024) )
a = torch.tensor([[0.0466, -0.0106, -0.0179]] )
else: # base
a = torch.Size((1, 1, 768) )
a = torch.tensor([[0.1457, 0.1044, 0.0174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
f'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
f' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , __lowerCamelCase , atol=1E-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print("""Saving PyTorch model to {}""".format(__lowerCamelCase ) )
model.save_pretrained(__lowerCamelCase )
def __A ( __lowerCamelCase ) -> List[str]:
a = {}
with open(__lowerCamelCase , """r""" , encoding="""utf-8""" ) as f:
for index, line in enumerate(__lowerCamelCase ):
a , a = line.rstrip().split("""\t""" )
a = index
return entity_vocab
if __name__ == "__main__":
__UpperCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
__UpperCamelCase : Optional[Any] = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 347 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a = tempfile.mkdtemp()
a = BlipImageProcessor()
a = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""" )
a = BlipProcessor(__magic_name__ , __magic_name__ )
processor.save_pretrained(self.tmpdirname )
def lowerCamelCase__ ( self :List[Any] , **__magic_name__ :Union[str, Any] ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **__magic_name__ ).tokenizer
def lowerCamelCase__ ( self :str , **__magic_name__ :List[str] ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **__magic_name__ ).image_processor
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
a = [Image.fromarray(np.moveaxis(__magic_name__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
a = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
a = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
a = self.get_image_processor(do_normalize=__magic_name__ , padding_value=1.0 )
a = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__magic_name__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __magic_name__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __magic_name__ )
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a = self.get_image_processor()
a = self.get_tokenizer()
a = BlipProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
a = self.prepare_image_inputs()
a = image_processor(__magic_name__ , return_tensors="""np""" )
a = processor(images=__magic_name__ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a = self.get_image_processor()
a = self.get_tokenizer()
a = BlipProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
a = """lower newer"""
a = processor(text=__magic_name__ )
a = tokenizer(__magic_name__ , return_token_type_ids=__magic_name__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
a = self.get_image_processor()
a = self.get_tokenizer()
a = BlipProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
a = """lower newer"""
a = self.prepare_image_inputs()
a = processor(text=__magic_name__ , images=__magic_name__ )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(__magic_name__ ):
processor()
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
a = self.get_image_processor()
a = self.get_tokenizer()
a = BlipProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a = processor.batch_decode(__magic_name__ )
a = tokenizer.batch_decode(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
a = self.get_image_processor()
a = self.get_tokenizer()
a = BlipProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
a = """lower newer"""
a = self.prepare_image_inputs()
a = processor(text=__magic_name__ , images=__magic_name__ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
| 347 | 1 |
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
snake_case_ : List[str] = u
for i in range(1 , __a ):
snake_case_ : Union[str, Any] = temp * (u - i)
return temp
def SCREAMING_SNAKE_CASE__ ( ):
snake_case_ : Dict = int(input('enter the numbers of values: ' ) )
snake_case_ : Any = []
for _ in range(__a ):
y.append([] )
for i in range(__a ):
for j in range(__a ):
y[i].append(__a )
snake_case_ : List[str] = 0
print('enter the values of parameters in a list: ' )
snake_case_ : Any = list(map(__a , input().split() ) )
print('enter the values of corresponding parameters: ' )
for i in range(__a ):
snake_case_ : Any = float(input() )
snake_case_ : List[Any] = int(input('enter the value to interpolate: ' ) )
snake_case_ : Optional[int] = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , __a ):
for j in range(n - i ):
snake_case_ : Any = y[j + 1][i - 1] - y[j][i - 1]
snake_case_ : Union[str, Any] = y[0][0]
for i in range(1 , __a ):
summ += (ucal(__a , __a ) * y[0][i]) / math.factorial(__a )
print(f"""the value at {value} is {summ}""" )
if __name__ == "__main__":
main()
| 327 |
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class snake_case__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = field(
metadata={"""help""": """The output directory where the model will be written."""} , )
SCREAMING_SNAKE_CASE_ : str = field(
metadata={
"""help""": (
"""The encoder model checkpoint for weights initialization."""
"""Don't set if you want to train an encoder model from scratch."""
)
} , )
SCREAMING_SNAKE_CASE_ : str = field(
metadata={
"""help""": (
"""The decoder model checkpoint for weights initialization."""
"""Don't set if you want to train a decoder model from scratch."""
)
} , )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=_UpperCamelCase , metadata={"""help""": """Pretrained encoder config name or path if not the same as encoder_model_name"""} )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=_UpperCamelCase , metadata={"""help""": """Pretrained decoder config name or path if not the same as decoder_model_name"""} )
def __magic_name__ ( ):
'''simple docstring'''
a = HfArgumentParser((ModelArguments,) )
((a) , ) = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
a = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
a = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
a = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
a = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
a = True
a = True
a = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path, decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path, encoder_config=A, decoder_config=A, )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
a = decoder_config.decoder_start_token_id
a = decoder_config.pad_token_id
if decoder_start_token_id is None:
a = decoder_config.bos_token_id
if pad_token_id is None:
a = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
a = decoder_config.eos_token_id
a = decoder_start_token_id
a = pad_token_id
a = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
a = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
a = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 107 | 0 |
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
'''The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion'''
)
lowercase__ : Tuple = None
lowercase__ : Optional[int] = {
'''7B''': 1_10_08,
'''13B''': 1_38_24,
'''30B''': 1_79_20,
'''65B''': 2_20_16,
'''70B''': 2_86_72,
}
lowercase__ : Any = {
'''7B''': 1,
'''7Bf''': 1,
'''13B''': 2,
'''13Bf''': 2,
'''30B''': 4,
'''65B''': 8,
'''70B''': 8,
'''70Bf''': 8,
}
def _lowerCAmelCase ( __snake_case : Union[str, Any] , __snake_case : str=1 , __snake_case : Optional[Any]=2_56 ) -> Any:
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def _lowerCAmelCase ( __snake_case : Dict ) -> Tuple:
with open(_a , 'r' ) as f:
return json.load(_a )
def _lowerCAmelCase ( __snake_case : List[Any] , __snake_case : str ) -> Optional[Any]:
with open(_a , 'w' ) as f:
json.dump(_a , _a )
def _lowerCAmelCase ( __snake_case : Union[str, Any] , __snake_case : List[Any] , __snake_case : Optional[Any] , __snake_case : str=True ) -> Union[str, Any]:
os.makedirs(_a , exist_ok=_a )
__A : Dict = os.path.join(_a , 'tmp' )
os.makedirs(_a , exist_ok=_a )
__A : Tuple = read_json(os.path.join(_a , 'params.json' ) )
__A : Union[str, Any] = NUM_SHARDS[model_size]
__A : Any = params['n_layers']
__A : List[Any] = params['n_heads']
__A : Tuple = n_heads // num_shards
__A : Tuple = params['dim']
__A : int = dim // n_heads
__A : Tuple = 1_00_00.0
__A : Tuple = 1.0 / (base ** (torch.arange(0 , _a , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
__A : List[Any] = params['n_kv_heads'] # for GQA / MQA
__A : Tuple = n_heads_per_shard // num_key_value_heads
__A : Optional[int] = dim // num_key_value_heads
else: # compatibility with other checkpoints
__A : Dict = n_heads
__A : Optional[int] = n_heads_per_shard
__A : Optional[Any] = dim
# permute for sliced rotary
def permute(__snake_case : Any , __snake_case : Union[str, Any]=n_heads , __snake_case : Union[str, Any]=dim , __snake_case : int=dim ):
return w.view(_a , dima // n_heads // 2 , 2 , _a ).transpose(1 , 2 ).reshape(_a , _a )
print(f'Fetching all parameters from the checkpoint at {input_base_path}.' )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
__A : List[str] = torch.load(os.path.join(_a , 'consolidated.00.pth' ) , map_location='cpu' )
else:
# Sharded
__A : Any = [
torch.load(os.path.join(_a , f'consolidated.{i:02d}.pth' ) , map_location='cpu' )
for i in range(_a )
]
__A : Optional[int] = 0
__A : Optional[Any] = {'weight_map': {}}
for layer_i in range(_a ):
__A : str = f'pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin'
if model_size == "7B":
# Unsharded
__A : Optional[int] = {
f'model.layers.{layer_i}.self_attn.q_proj.weight': permute(
loaded[f'layers.{layer_i}.attention.wq.weight'] ),
f'model.layers.{layer_i}.self_attn.k_proj.weight': permute(
loaded[f'layers.{layer_i}.attention.wk.weight'] ),
f'model.layers.{layer_i}.self_attn.v_proj.weight': loaded[f'layers.{layer_i}.attention.wv.weight'],
f'model.layers.{layer_i}.self_attn.o_proj.weight': loaded[f'layers.{layer_i}.attention.wo.weight'],
f'model.layers.{layer_i}.mlp.gate_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w1.weight'],
f'model.layers.{layer_i}.mlp.down_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w2.weight'],
f'model.layers.{layer_i}.mlp.up_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w3.weight'],
f'model.layers.{layer_i}.input_layernorm.weight': loaded[f'layers.{layer_i}.attention_norm.weight'],
f'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[f'layers.{layer_i}.ffn_norm.weight'],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
__A : Tuple = {
f'model.layers.{layer_i}.input_layernorm.weight': loaded[0][
f'layers.{layer_i}.attention_norm.weight'
].clone(),
f'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[0][
f'layers.{layer_i}.ffn_norm.weight'
].clone(),
}
__A : Any = permute(
torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wq.weight'].view(_a , _a , _a )
for i in range(_a )
] , dim=0 , ).reshape(_a , _a ) )
__A : List[Any] = permute(
torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wk.weight'].view(
_a , _a , _a )
for i in range(_a )
] , dim=0 , ).reshape(_a , _a ) , _a , _a , _a , )
__A : Union[str, Any] = torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wv.weight'].view(
_a , _a , _a )
for i in range(_a )
] , dim=0 , ).reshape(_a , _a )
__A : str = torch.cat(
[loaded[i][f'layers.{layer_i}.attention.wo.weight'] for i in range(_a )] , dim=1 )
__A : str = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w1.weight'] for i in range(_a )] , dim=0 )
__A : Optional[int] = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w2.weight'] for i in range(_a )] , dim=1 )
__A : List[str] = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w3.weight'] for i in range(_a )] , dim=0 )
__A : int = inv_freq
for k, v in state_dict.items():
__A : List[str] = filename
param_count += v.numel()
torch.save(_a , os.path.join(_a , _a ) )
__A : Optional[int] = f'pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin'
if model_size == "7B":
# Unsharded
__A : Optional[int] = {
'model.embed_tokens.weight': loaded['tok_embeddings.weight'],
'model.norm.weight': loaded['norm.weight'],
'lm_head.weight': loaded['output.weight'],
}
else:
__A : Tuple = {
'model.norm.weight': loaded[0]['norm.weight'],
'model.embed_tokens.weight': torch.cat(
[loaded[i]['tok_embeddings.weight'] for i in range(_a )] , dim=1 ),
'lm_head.weight': torch.cat([loaded[i]['output.weight'] for i in range(_a )] , dim=0 ),
}
for k, v in state_dict.items():
__A : str = filename
param_count += v.numel()
torch.save(_a , os.path.join(_a , _a ) )
# Write configs
__A : Dict = {'total_size': param_count * 2}
write_json(_a , os.path.join(_a , 'pytorch_model.bin.index.json' ) )
__A : int = params['ffn_dim_multiplier'] if 'ffn_dim_multiplier' in params else 1
__A : str = params['multiple_of'] if 'multiple_of' in params else 2_56
__A : Optional[int] = LlamaConfig(
hidden_size=_a , intermediate_size=compute_intermediate_size(_a , _a , _a ) , num_attention_heads=params['n_heads'] , num_hidden_layers=params['n_layers'] , rms_norm_eps=params['norm_eps'] , num_key_value_heads=_a , )
config.save_pretrained(_a )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print('Loading the checkpoint in a Llama model.' )
__A : Union[str, Any] = LlamaForCausalLM.from_pretrained(_a , torch_dtype=torch.floataa , low_cpu_mem_usage=_a )
# Avoid saving this as part of the config.
del model.config._name_or_path
print('Saving in the Transformers format.' )
model.save_pretrained(_a , safe_serialization=_a )
shutil.rmtree(_a )
def _lowerCAmelCase ( __snake_case : int , __snake_case : Optional[int] ) -> Tuple:
__A : int = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(f'Saving a {tokenizer_class.__name__} to {tokenizer_path}.' )
__A : List[str] = tokenizer_class(_a )
tokenizer.save_pretrained(_a )
def _lowerCAmelCase ( ) -> Optional[Any]:
__A : Any = argparse.ArgumentParser()
parser.add_argument(
'--input_dir' , help='Location of LLaMA weights, which contains tokenizer.model and model folders' , )
parser.add_argument(
'--model_size' , choices=['7B', '7Bf', '13B', '13Bf', '30B', '65B', '70B', '70Bf', 'tokenizer_only'] , )
parser.add_argument(
'--output_dir' , help='Location to write HF model and tokenizer' , )
parser.add_argument('--safe_serialization' , type=_a , help='Whether or not to save using `safetensors`.' )
__A : str = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
__A : int = os.path.join(args.input_dir , 'tokenizer.model' )
write_tokenizer(args.output_dir , _a )
if __name__ == "__main__":
main() | 361 |
'''simple docstring'''
from __future__ import annotations
from math import gcd
def _lowerCAmelCase ( __snake_case : int , __snake_case : int = 2 , __snake_case : int = 1 , __snake_case : int = 3 , ) -> int | None:
# A value less than 2 can cause an infinite loop in the algorithm.
if num < 2:
raise ValueError('The input value cannot be less than 2' )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(__snake_case : int , __snake_case : int , __snake_case : int ) -> int:
return (pow(__snake_case , 2 ) + step) % modulus
for _ in range(__snake_case ):
# These track the position within the cycle detection logic.
__A : int = seed
__A : Union[str, Any] = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
__A : List[Any] = rand_fn(__snake_case , __snake_case , __snake_case )
__A : Optional[Any] = rand_fn(__snake_case , __snake_case , __snake_case )
__A : Any = rand_fn(__snake_case , __snake_case , __snake_case )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
__A : Optional[int] = gcd(hare - tortoise , __snake_case )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
__A : Union[str, Any] = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
lowercase__ : str = argparse.ArgumentParser()
parser.add_argument(
'''num''',
type=int,
help='''The value to find a divisor of''',
)
parser.add_argument(
'''--attempts''',
type=int,
default=3,
help='''The number of attempts before giving up''',
)
lowercase__ : Optional[int] = parser.parse_args()
lowercase__ : int = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(f"""{args.num} is probably prime""")
else:
lowercase__ : List[str] = args.num // divisor
print(f"""{args.num} = {divisor} * {quotient}""") | 190 | 0 |
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def _SCREAMING_SNAKE_CASE (A ) -> int:
"""simple docstring"""
lowercase__ = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
lowercase__ = True if '''large''' in model_name or '''huge''' in model_name else False
lowercase__ = True if '''large''' in model_name or '''huge''' in model_name else False
lowercase__ = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
lowercase__ = [3, 3, 3, 3]
lowercase__ = [5, 5, 5, 5]
elif "fl4" in model_name:
lowercase__ = [4, 4, 4, 4]
lowercase__ = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
lowercase__ = [3, 3, 3, 3]
if "lrf" in model_name:
lowercase__ = [3, 3, 3, 3]
else:
lowercase__ = [2, 2, 2, 2]
if "tiny" in model_name:
lowercase__ = 96
elif "small" in model_name:
lowercase__ = 96
elif "base" in model_name:
lowercase__ = 128
elif "large" in model_name:
lowercase__ = 192
elif "xlarge" in model_name:
lowercase__ = 256
elif "huge" in model_name:
lowercase__ = 352
# set label information
lowercase__ = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
lowercase__ = '''imagenet-22k-id2label.json'''
else:
lowercase__ = '''imagenet-1k-id2label.json'''
lowercase__ = json.load(open(hf_hub_download(A , A , repo_type='''dataset''' ) , '''r''' ) )
lowercase__ = {int(A ): v for k, v in idalabel.items()}
lowercase__ = {v: k for k, v in idalabel.items()}
lowercase__ = FocalNetConfig(
embed_dim=A , depths=A , focal_levels=A , focal_windows=A , use_conv_embed=A , idalabel=A , labelaid=A , use_post_layernorm=A , use_layerscale=A , )
return config
def _SCREAMING_SNAKE_CASE (A ) -> Dict:
"""simple docstring"""
if "patch_embed.proj" in name:
lowercase__ = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowercase__ = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
lowercase__ = '''encoder.''' + name
if "encoder.layers" in name:
lowercase__ = name.replace('''encoder.layers''' , '''encoder.stages''' )
if "downsample.proj" in name:
lowercase__ = name.replace('''downsample.proj''' , '''downsample.projection''' )
if "blocks" in name:
lowercase__ = name.replace('''blocks''' , '''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
lowercase__ = name.replace('''modulation.f''' , '''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
lowercase__ = name.replace('''modulation.h''' , '''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
lowercase__ = name.replace('''modulation.proj''' , '''modulation.projection_out''' )
if name == "norm.weight":
lowercase__ = '''layernorm.weight'''
if name == "norm.bias":
lowercase__ = '''layernorm.bias'''
if "head" in name:
lowercase__ = name.replace('''head''' , '''classifier''' )
else:
lowercase__ = '''focalnet.''' + name
return name
def _SCREAMING_SNAKE_CASE (A , A , A=False ) -> Any:
"""simple docstring"""
lowercase__ = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
lowercase__ = model_name_to_url[model_name]
print('''Checkpoint URL: ''' , A )
lowercase__ = torch.hub.load_state_dict_from_url(A , map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
lowercase__ = state_dict.pop(A )
lowercase__ = val
lowercase__ = get_focalnet_config(A )
lowercase__ = FocalNetForImageClassification(A )
model.eval()
# load state dict
model.load_state_dict(A )
# verify conversion
lowercase__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase__ = BitImageProcessor(
do_resize=A , size={'''shortest_edge''': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=A , crop_size=224 , do_normalize=A , image_mean=A , image_std=A , )
lowercase__ = Image.open(requests.get(A , stream=A ).raw )
lowercase__ = processor(images=A , return_tensors='''pt''' )
lowercase__ = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
lowercase__ = image_transforms(A ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , A , atol=1E-4 )
lowercase__ = model(**A )
lowercase__ = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
lowercase__ = torch.tensor([0.2_166, -0.4_368, 0.2_191] )
elif model_name == "focalnet-tiny-lrf":
lowercase__ = torch.tensor([1.1_669, 0.0_125, -0.1_695] )
elif model_name == "focalnet-small":
lowercase__ = torch.tensor([0.4_917, -0.0_430, 0.1_341] )
elif model_name == "focalnet-small-lrf":
lowercase__ = torch.tensor([-0.2_588, -0.5_342, -0.2_331] )
elif model_name == "focalnet-base":
lowercase__ = torch.tensor([-0.1_655, -0.4_090, -0.1_730] )
elif model_name == "focalnet-base-lrf":
lowercase__ = torch.tensor([0.5_306, -0.0_483, -0.3_928] )
assert torch.allclose(outputs.logits[0, :3] , A , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"Saving model and processor of {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(A )
processor.save_pretrained(A )
if push_to_hub:
print(f"Pushing model and processor of {model_name} to the hub..." )
model.push_to_hub(f"{model_name}" )
processor.push_to_hub(f"{model_name}" )
if __name__ == "__main__":
lowerCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='focalnet-tiny',
type=str,
help='Name of the FocalNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub.',
)
lowerCamelCase : Union[str, Any] = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 2 | '''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Optional[Any] = logging.get_logger(__name__)
_lowercase : Tuple = {
"BAAI/AltCLIP": "https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json",
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = '''altclip_text_model'''
def __init__( self : Union[str, Any] , lowercase_ : str=250002 , lowercase_ : Union[str, Any]=1024 , lowercase_ : Any=24 , lowercase_ : Union[str, Any]=16 , lowercase_ : Any=4096 , lowercase_ : Union[str, Any]="gelu" , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : int=514 , lowercase_ : Union[str, Any]=1 , lowercase_ : List[str]=0.02 , lowercase_ : Optional[int]=0.02 , lowercase_ : str=1E-05 , lowercase_ : List[str]=1 , lowercase_ : List[Any]=0 , lowercase_ : Dict=2 , lowercase_ : Union[str, Any]="absolute" , lowercase_ : Any=True , lowercase_ : Union[str, Any]=768 , **lowercase_ : Any , ):
super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
lowercase_ : Union[str, Any] = vocab_size
lowercase_ : str = hidden_size
lowercase_ : Optional[Any] = num_hidden_layers
lowercase_ : int = num_attention_heads
lowercase_ : str = hidden_act
lowercase_ : List[str] = intermediate_size
lowercase_ : Optional[int] = hidden_dropout_prob
lowercase_ : str = attention_probs_dropout_prob
lowercase_ : str = max_position_embeddings
lowercase_ : List[str] = type_vocab_size
lowercase_ : Union[str, Any] = initializer_range
lowercase_ : List[Any] = initializer_factor
lowercase_ : str = layer_norm_eps
lowercase_ : Tuple = position_embedding_type
lowercase_ : List[Any] = use_cache
lowercase_ : Tuple = project_dim
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = '''altclip_vision_model'''
def __init__( self : Dict , lowercase_ : Any=768 , lowercase_ : Dict=3072 , lowercase_ : Optional[Any]=512 , lowercase_ : Dict=12 , lowercase_ : Optional[int]=12 , lowercase_ : Optional[Any]=3 , lowercase_ : str=224 , lowercase_ : List[Any]=32 , lowercase_ : Union[str, Any]="quick_gelu" , lowercase_ : Dict=1E-5 , lowercase_ : Optional[int]=0.0 , lowercase_ : Optional[Any]=0.02 , lowercase_ : Optional[Any]=1.0 , **lowercase_ : Dict , ):
super().__init__(**lowercase_ )
lowercase_ : Tuple = hidden_size
lowercase_ : Optional[Any] = intermediate_size
lowercase_ : Optional[Any] = projection_dim
lowercase_ : Tuple = num_hidden_layers
lowercase_ : List[Any] = num_attention_heads
lowercase_ : Any = num_channels
lowercase_ : Any = patch_size
lowercase_ : Dict = image_size
lowercase_ : Optional[Any] = initializer_range
lowercase_ : str = initializer_factor
lowercase_ : Any = attention_dropout
lowercase_ : Optional[int] = layer_norm_eps
lowercase_ : int = hidden_act
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Optional[int] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : Any ):
cls._set_token_in_kwargs(lowercase_ )
lowercase_ , lowercase_ : str = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get("""model_type""" ) == "altclip":
lowercase_ : List[str] = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowercase_ , **lowercase_ )
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = '''altclip'''
UpperCamelCase__ = True
def __init__( self : Optional[int] , lowercase_ : Dict=None , lowercase_ : List[Any]=None , lowercase_ : Tuple=768 , lowercase_ : List[str]=2.65_92 , **lowercase_ : List[Any] ):
# If `_config_dict` exist, we use them for the backward compatibility.
# We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot
# of confusion!).
lowercase_ : Dict = kwargs.pop("""text_config_dict""" , lowercase_ )
lowercase_ : str = kwargs.pop("""vision_config_dict""" , lowercase_ )
super().__init__(**lowercase_ )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
lowercase_ : Dict = {}
# This is the complete result when using `text_config_dict`.
lowercase_ : List[str] = AltCLIPTextConfig(**lowercase_ ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
lowercase_ : Optional[Any] = (
f'''`{key}` is found in both `text_config_dict` and `text_config` but with different values. '''
f'''The value `text_config_dict["{key}"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
lowercase_ : Tuple = (
f'''`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The '''
f'''value `text_config["{key}"]` will be overriden.'''
)
logger.warning(lowercase_ )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
lowercase_ : int = {}
# This is the complete result when using `vision_config_dict`.
lowercase_ : List[str] = AltCLIPVisionConfig(**lowercase_ ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
lowercase_ : List[str] = {
str(lowercase_ ): value for key, value in _vision_config_dict["""id2label"""].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
lowercase_ : Any = (
f'''`{key}` is found in both `vision_config_dict` and `vision_config` but with different '''
f'''values. The value `vision_config_dict["{key}"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
lowercase_ : List[str] = (
f'''`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. '''
f'''The value `vision_config["{key}"]` will be overriden.'''
)
logger.warning(lowercase_ )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
lowercase_ : int = {}
logger.info("""`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.""" )
if vision_config is None:
lowercase_ : Optional[int] = {}
logger.info("""`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.""" )
lowercase_ : Optional[int] = AltCLIPTextConfig(**lowercase_ )
lowercase_ : Any = AltCLIPVisionConfig(**lowercase_ )
lowercase_ : List[Any] = projection_dim
lowercase_ : Optional[Any] = logit_scale_init_value
lowercase_ : int = 1.0
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Dict , lowercase_ : AltCLIPTextConfig , lowercase_ : AltCLIPVisionConfig , **lowercase_ : Optional[int] ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : Union[str, Any] = copy.deepcopy(self.__dict__ )
lowercase_ : Optional[int] = self.text_config.to_dict()
lowercase_ : Any = self.vision_config.to_dict()
lowercase_ : List[str] = self.__class__.model_type
return output
| 239 | 0 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
a_ = get_tests_dir('fixtures')
class _lowercase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : Tuple = mock.Mock()
UpperCamelCase_ : Union[str, Any] = 5_0_0
UpperCamelCase_ : List[str] = {}
UpperCamelCase_ : int = HTTPError
UpperCamelCase_ : str = {}
# Download this model to make sure it's in the cache.
UpperCamelCase_ : Any = ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=snake_case ) as mock_head:
UpperCamelCase_ : Optional[int] = ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit' )
# This check we did call the fake head request
mock_head.assert_called()
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ : Dict = ViTImageProcessor.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json' )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
with self.assertRaises(snake_case ):
# config is in subfolder, the following should not work without specifying the subfolder
UpperCamelCase_ : Any = AutoImageProcessor.from_pretrained('hf-internal-testing/stable-diffusion-all-variants' )
UpperCamelCase_ : Optional[Any] = AutoImageProcessor.from_pretrained(
'hf-internal-testing/stable-diffusion-all-variants' , subfolder='feature_extractor' )
self.assertIsNotNone(snake_case )
@is_staging_test
class _lowercase ( unittest.TestCase ):
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Optional[Any] ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ : List[Any] = TOKEN
HfFolder.save_token(snake_case )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Optional[int] ) -> List[str]:
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='test-image-processor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-image-processor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-image-processor' )
except HTTPError:
pass
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : List[Any] = ViTImageProcessor.from_pretrained(snake_case )
image_processor.push_to_hub('test-image-processor' , use_auth_token=self._token )
UpperCamelCase_ : int = ViTImageProcessor.from_pretrained(f"{USER}/test-image-processor" )
for k, v in image_processor.__dict__.items():
self.assertEqual(snake_case , getattr(snake_case , snake_case ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-image-processor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
snake_case , repo_id='test-image-processor' , push_to_hub=snake_case , use_auth_token=self._token )
UpperCamelCase_ : int = ViTImageProcessor.from_pretrained(f"{USER}/test-image-processor" )
for k, v in image_processor.__dict__.items():
self.assertEqual(snake_case , getattr(snake_case , snake_case ) )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : Any = ViTImageProcessor.from_pretrained(snake_case )
image_processor.push_to_hub('valid_org/test-image-processor' , use_auth_token=self._token )
UpperCamelCase_ : List[Any] = ViTImageProcessor.from_pretrained('valid_org/test-image-processor' )
for k, v in image_processor.__dict__.items():
self.assertEqual(snake_case , getattr(snake_case , snake_case ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-image-processor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
snake_case , repo_id='valid_org/test-image-processor-org' , push_to_hub=snake_case , use_auth_token=self._token )
UpperCamelCase_ : Tuple = ViTImageProcessor.from_pretrained('valid_org/test-image-processor-org' )
for k, v in image_processor.__dict__.items():
self.assertEqual(snake_case , getattr(snake_case , snake_case ) )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
CustomImageProcessor.register_for_auto_class()
UpperCamelCase_ : str = CustomImageProcessor.from_pretrained(snake_case )
image_processor.push_to_hub('test-dynamic-image-processor' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {'AutoImageProcessor': 'custom_image_processing.CustomImageProcessor'} , )
UpperCamelCase_ : List[str] = AutoImageProcessor.from_pretrained(
f"{USER}/test-dynamic-image-processor" , trust_remote_code=snake_case )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , 'CustomImageProcessor' )
| 50 | import numpy
# List of input, output pairs
a_ = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
a_ = (((515, 22, 13), 555), ((61, 35, 49), 150))
a_ = [2, 4, 1, 5]
a_ = len(train_data)
a_ = 0.009
def __lowercase ( lowerCamelCase : Optional[int] , lowerCamelCase : Any="train" ):
return calculate_hypothesis_value(lowerCamelCase , lowerCamelCase ) - output(
lowerCamelCase , lowerCamelCase )
def __lowercase ( lowerCamelCase : str ):
UpperCamelCase_ : List[str] = 0
for i in range(len(lowerCamelCase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def __lowercase ( lowerCamelCase : int , lowerCamelCase : Any ):
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def __lowercase ( lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any] ):
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def __lowercase ( lowerCamelCase : Union[str, Any] , lowerCamelCase : Dict=m ):
UpperCamelCase_ : str = 0
for i in range(lowerCamelCase ):
if index == -1:
summation_value += _error(lowerCamelCase )
else:
summation_value += _error(lowerCamelCase ) * train_data[i][0][index]
return summation_value
def __lowercase ( lowerCamelCase : int ):
UpperCamelCase_ : List[str] = summation_of_cost_derivative(lowerCamelCase , lowerCamelCase ) / m
return cost_derivative_value
def __lowercase ( ):
global parameter_vector
# Tune these values to set a tolerance value for predicted output
UpperCamelCase_ : Optional[int] = 0.0_0_0_0_0_2
UpperCamelCase_ : Optional[int] = 0
UpperCamelCase_ : Union[str, Any] = 0
while True:
j += 1
UpperCamelCase_ : Dict = [0, 0, 0, 0]
for i in range(0 , len(lowerCamelCase ) ):
UpperCamelCase_ : Any = get_cost_derivative(i - 1 )
UpperCamelCase_ : List[str] = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
lowerCamelCase , lowerCamelCase , atol=lowerCamelCase , rtol=lowerCamelCase , ):
break
UpperCamelCase_ : Optional[Any] = temp_parameter_vector
print(('Number of iterations:', j) )
def __lowercase ( ):
for i in range(len(lowerCamelCase ) ):
print(('Actual output value:', output(lowerCamelCase , 'test' )) )
print(('Hypothesis output:', calculate_hypothesis_value(lowerCamelCase , 'test' )) )
if __name__ == "__main__":
run_gradient_descent()
print('\nTesting gradient descent for a linear hypothesis function.\n')
test_gradient_descent()
| 50 | 1 |
"""simple docstring"""
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def A_ ( _lowercase, _lowercase ):
'''simple docstring'''
snake_case_ :Optional[int] = torch.load(_lowercase, map_location="""cpu""" )
snake_case_ :Any = chkpt["""model"""]
# We have the base model one level deeper than the original XLM repository
snake_case_ :Dict = {}
for k, v in state_dict.items():
if "pred_layer" in k:
snake_case_ :Optional[Any] = v
else:
snake_case_ :List[str] = v
snake_case_ :List[Any] = chkpt["""params"""]
snake_case_ :str = {n: v for n, v in config.items() if not isinstance(_lowercase, (torch.FloatTensor, numpy.ndarray) )}
snake_case_ :List[Any] = chkpt["""dico_word2id"""]
snake_case_ :Optional[Any] = {s + """</w>""" if s.find("""@@""" ) == -1 and i > 13 else s.replace("""@@""", """""" ): i for s, i in vocab.items()}
# Save pytorch-model
snake_case_ :Dict = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
snake_case_ :List[Any] = pytorch_dump_folder_path + """/""" + CONFIG_NAME
snake_case_ :Optional[int] = pytorch_dump_folder_path + """/""" + VOCAB_FILES_NAMES["""vocab_file"""]
print(f"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(_lowercase, _lowercase )
print(f"""Save configuration file to {pytorch_config_dump_path}""" )
with open(_lowercase, """w""", encoding="""utf-8""" ) as f:
f.write(json.dumps(_lowercase, indent=2 ) + """\n""" )
print(f"""Save vocab file to {pytorch_config_dump_path}""" )
with open(_lowercase, """w""", encoding="""utf-8""" ) as f:
f.write(json.dumps(_lowercase, indent=2 ) + """\n""" )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--xlm_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__a = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 66 |
'''simple docstring'''
import random
def lowerCAmelCase (__A):
"""simple docstring"""
_a = num - 1
_a = 0
while s % 2 == 0:
_a = s // 2
t += 1
for _ in range(5):
_a = random.randrange(2 , num - 1)
_a = pow(__A , __A , __A)
if v != 1:
_a = 0
while v != (num - 1):
if i == t - 1:
return False
else:
_a = i + 1
_a = (v**2) % num
return True
def lowerCAmelCase (__A):
"""simple docstring"""
if num < 2:
return False
_a = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
101,
103,
107,
109,
113,
127,
131,
137,
139,
149,
151,
157,
163,
167,
173,
179,
181,
191,
193,
197,
199,
211,
223,
227,
229,
233,
239,
241,
251,
257,
263,
269,
271,
277,
281,
283,
293,
307,
311,
313,
317,
331,
337,
347,
349,
353,
359,
367,
373,
379,
383,
389,
397,
401,
409,
419,
421,
431,
433,
439,
443,
449,
457,
461,
463,
467,
479,
487,
491,
499,
503,
509,
521,
523,
541,
547,
557,
563,
569,
571,
577,
587,
593,
599,
601,
607,
613,
617,
619,
631,
641,
643,
647,
653,
659,
661,
673,
677,
683,
691,
701,
709,
719,
727,
733,
739,
743,
751,
757,
761,
769,
773,
787,
797,
809,
811,
821,
823,
827,
829,
839,
853,
857,
859,
863,
877,
881,
883,
887,
907,
911,
919,
929,
937,
941,
947,
953,
967,
971,
977,
983,
991,
997,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(__A)
def lowerCAmelCase (__A = 1_024):
"""simple docstring"""
while True:
_a = random.randrange(2 ** (keysize - 1) , 2 ** (keysize))
if is_prime_low_num(__A):
return num
if __name__ == "__main__":
lowercase_ = generate_large_prime()
print(("Prime number:", num))
print(("is_prime_low_num:", is_prime_low_num(num)))
| 211 | 0 |
"""simple docstring"""
UpperCAmelCase__ = {
'Pillow': 'Pillow<10.0.0',
'accelerate': 'accelerate>=0.20.3',
'av': 'av==9.2.0',
'beautifulsoup4': 'beautifulsoup4',
'black': 'black~=23.1',
'codecarbon': 'codecarbon==1.2.0',
'cookiecutter': 'cookiecutter==1.7.3',
'dataclasses': 'dataclasses',
'datasets': 'datasets!=2.5.0',
'decord': 'decord==0.6.0',
'deepspeed': 'deepspeed>=0.9.3',
'diffusers': 'diffusers',
'dill': 'dill<0.3.5',
'evaluate': 'evaluate>=0.2.0',
'fairscale': 'fairscale>0.3',
'faiss-cpu': 'faiss-cpu',
'fastapi': 'fastapi',
'filelock': 'filelock',
'flax': 'flax>=0.4.1,<=0.7.0',
'ftfy': 'ftfy',
'fugashi': 'fugashi>=1.0',
'GitPython': 'GitPython<3.1.19',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0',
'importlib_metadata': 'importlib_metadata',
'ipadic': 'ipadic>=1.0.0,<2.0',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13',
'jaxlib': 'jaxlib>=0.1.65,<=0.4.13',
'jieba': 'jieba',
'kenlm': 'kenlm',
'keras-nlp': 'keras-nlp>=0.3.1',
'librosa': 'librosa',
'nltk': 'nltk',
'natten': 'natten>=0.14.6',
'numpy': 'numpy>=1.17',
'onnxconverter-common': 'onnxconverter-common',
'onnxruntime-tools': 'onnxruntime-tools>=1.4.2',
'onnxruntime': 'onnxruntime>=1.4.0',
'opencv-python': 'opencv-python',
'optuna': 'optuna',
'optax': 'optax>=0.0.8,<=0.1.4',
'packaging': 'packaging>=20.0',
'parameterized': 'parameterized',
'phonemizer': 'phonemizer',
'protobuf': 'protobuf',
'psutil': 'psutil',
'pyyaml': 'pyyaml>=5.1',
'pydantic': 'pydantic<2',
'pytest': 'pytest>=7.2.0',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'python': 'python>=3.8.0',
'ray[tune]': 'ray[tune]',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'rhoknp': 'rhoknp>=1.1.0,<1.3.1',
'rjieba': 'rjieba',
'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1',
'ruff': 'ruff>=0.0.241,<=0.0.259',
'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0',
'sacremoses': 'sacremoses',
'safetensors': 'safetensors>=0.3.1',
'sagemaker': 'sagemaker>=2.31.0',
'scikit-learn': 'scikit-learn',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'sigopt': 'sigopt',
'starlette': 'starlette',
'sudachipy': 'sudachipy>=0.6.6',
'sudachidict_core': 'sudachidict_core>=20220729',
'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14',
'tensorflow': 'tensorflow>=2.6,<2.14',
'tensorflow-text': 'tensorflow-text<2.14',
'tf2onnx': 'tf2onnx',
'timeout-decorator': 'timeout-decorator',
'timm': 'timm',
'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14',
'torch': 'torch>=1.9,!=1.12.0',
'torchaudio': 'torchaudio',
'torchvision': 'torchvision',
'pyctcdecode': 'pyctcdecode>=0.4.0',
'tqdm': 'tqdm>=4.27',
'unidic': 'unidic>=1.0.2',
'unidic_lite': 'unidic_lite>=1.0.7',
'urllib3': 'urllib3<2.0.0',
'uvicorn': 'uvicorn',
}
| 363 |
"""simple docstring"""
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class lowerCAmelCase__ :
def __init__( self : Any , _lowerCamelCase : Optional[Any] , ):
_snake_case = parent
_snake_case = 13
_snake_case = 7
_snake_case = 30
_snake_case = self.seq_length + self.mem_len
_snake_case = 15
_snake_case = True
_snake_case = True
_snake_case = 99
_snake_case = [10, 50, 80]
_snake_case = 32
_snake_case = 32
_snake_case = 4
_snake_case = 8
_snake_case = 128
_snake_case = 2
_snake_case = 2
_snake_case = None
_snake_case = 1
_snake_case = 0
_snake_case = 3
_snake_case = self.vocab_size - 1
_snake_case = 0.0_1
def lowercase ( self : Optional[int] ):
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def lowercase ( self : Any ):
random.seed(self.seed )
tf.random.set_seed(self.seed )
def lowercase ( self : Dict , _lowerCamelCase : int , _lowerCamelCase : List[Any] , _lowerCamelCase : Dict , _lowerCamelCase : List[str] ):
_snake_case = TFTransfoXLModel(_lowerCamelCase )
_snake_case , _snake_case = model(_lowerCamelCase ).to_tuple()
_snake_case = {'''input_ids''': input_ids_a, '''mems''': mems_a}
_snake_case , _snake_case = model(_lowerCamelCase ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def lowercase ( self : List[Any] , _lowerCamelCase : Dict , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Tuple ):
_snake_case = TFTransfoXLLMHeadModel(_lowerCamelCase )
_snake_case , _snake_case = model(_lowerCamelCase ).to_tuple()
_snake_case = {'''input_ids''': input_ids_a, '''labels''': lm_labels}
_snake_case , _snake_case = model(_lowerCamelCase ).to_tuple()
_snake_case , _snake_case = model([input_ids_a, mems_a] ).to_tuple()
_snake_case = {'''input_ids''': input_ids_a, '''mems''': mems_a, '''labels''': lm_labels}
_snake_case , _snake_case = model(_lowerCamelCase ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def lowercase ( self : Any , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[str] ):
_snake_case = TFTransfoXLForSequenceClassification(_lowerCamelCase )
_snake_case = model(_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase ( self : str ):
_snake_case = self.prepare_config_and_inputs()
((_snake_case) , (_snake_case) , (_snake_case) , (_snake_case)) = config_and_inputs
_snake_case = {'''input_ids''': input_ids_a}
return config, inputs_dict
@require_tf
class lowerCAmelCase__ ( A_ , A_ , unittest.TestCase ):
__a = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
__a = () if is_tf_available() else ()
__a = (
{
"""feature-extraction""": TFTransfoXLModel,
"""text-classification""": TFTransfoXLForSequenceClassification,
"""text-generation""": TFTransfoXLLMHeadModel,
"""zero-shot""": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
__a = False
__a = False
__a = False
__a = False
def lowercase ( self : List[Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any] ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def lowercase ( self : List[Any] ):
_snake_case = TFTransfoXLModelTester(self )
_snake_case = ConfigTester(self , config_class=_lowerCamelCase , d_embed=37 )
def lowercase ( self : List[str] ):
self.config_tester.run_common_tests()
def lowercase ( self : Union[str, Any] ):
self.model_tester.set_seed()
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*_lowerCamelCase )
def lowercase ( self : str ):
self.model_tester.set_seed()
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*_lowerCamelCase )
def lowercase ( self : str ):
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*_lowerCamelCase )
def lowercase ( self : str ):
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
_snake_case = model_class(_lowerCamelCase )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
_snake_case = model.get_output_embeddings()
assert isinstance(_lowerCamelCase , tf.keras.layers.Layer )
_snake_case = model.get_bias()
assert name is None
else:
_snake_case = model.get_output_embeddings()
assert x is None
_snake_case = model.get_bias()
assert name is None
def lowercase ( self : Optional[Any] ):
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def lowercase ( self : int ):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = TFTransfoXLModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
@unittest.skip(reason='''This model doesn\'t play well with fit() due to not returning a single loss.''' )
def lowercase ( self : int ):
pass
@require_tf
class lowerCAmelCase__ ( unittest.TestCase ):
@unittest.skip('''Skip test until #12651 is resolved.''' )
@slow
def lowercase ( self : List[Any] ):
_snake_case = TFTransfoXLLMHeadModel.from_pretrained('''transfo-xl-wt103''' )
# fmt: off
_snake_case = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
_snake_case = [33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0,33,1,1857,2,1,1009,4,1109,11739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
_snake_case = model.generate(_lowerCamelCase , max_length=200 , do_sample=_lowerCamelCase )
self.assertListEqual(output_ids[0].numpy().tolist() , _lowerCamelCase )
| 40 | 0 |
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = [False] * len(lowerCAmelCase__ )
lowercase = [-1] * len(lowerCAmelCase__ )
def dfs(lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase = True
lowercase = c
for u in graph[v]:
if not visited[u]:
dfs(lowerCAmelCase__ , 1 - c )
for i in range(len(lowerCAmelCase__ ) ):
if not visited[i]:
dfs(lowerCAmelCase__ , 0 )
for i in range(len(lowerCAmelCase__ ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
lowercase__ :Union[str, Any] = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 101 |
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if edge <= 0 or not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError('''Length must be a positive.''' )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if edge <= 0 or not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError('''Length must be a positive.''' )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 101 | 1 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger()
@dataclass
class a_ :
'''simple docstring'''
UpperCAmelCase_ = 42
UpperCAmelCase_ = field(default_factory=A__ )
UpperCAmelCase_ = field(default_factory=A__ )
def __snake_case ( self : str , lowercase__ : Dict , lowercase__ : Optional[Any] , lowercase__ : List[Any]):
'''simple docstring'''
lowerCAmelCase__ = len(list(m.modules())) == 1 or isinstance(__A , nn.Convad) or isinstance(__A , nn.BatchNormad)
if has_not_submodules:
self.traced.append(__A)
def __call__( self : int , lowercase__ : List[Any]):
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook))
self.module(__A)
[x.remove() for x in self.handles]
return self
@property
def __snake_case ( self : Tuple):
'''simple docstring'''
return list(filter(lambda lowercase__: len(list(x.state_dict().keys())) > 0 , self.traced))
@dataclass
class a_ :
'''simple docstring'''
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = 1
UpperCAmelCase_ = field(default_factory=A__ )
UpperCAmelCase_ = field(default_factory=A__ )
UpperCAmelCase_ = True
def __call__( self : Dict , lowercase__ : Optional[int]):
'''simple docstring'''
lowerCAmelCase__ = Tracker(self.dest)(__A).parametrized
lowerCAmelCase__ = Tracker(self.src)(__A).parametrized
lowerCAmelCase__ = list(filter(lambda lowercase__: type(__A) not in self.src_skip , __A))
lowerCAmelCase__ = list(filter(lambda lowercase__: type(__A) not in self.dest_skip , __A))
if len(__A) != len(__A) and self.raise_if_mismatch:
raise Exception(
F"""Numbers of operations are different. Source module has {len(__A)} operations while"""
F""" destination module has {len(__A)}.""")
for dest_m, src_m in zip(__A , __A):
dest_m.load_state_dict(src_m.state_dict())
if self.verbose == 1:
print(F"""Transfered from={src_m} to={dest_m}""")
class a_ ( nn.Module ):
'''simple docstring'''
def __init__( self : int , lowercase__ : str):
'''simple docstring'''
super().__init__()
lowerCAmelCase__ = []
# - get the stem
feature_blocks.append(('conv1', model.stem))
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith('block'), F"""Unexpected layer name {k}"""
lowerCAmelCase__ = len(__A) + 1
feature_blocks.append((F"""res{block_index}""", v))
lowerCAmelCase__ = nn.ModuleDict(__A)
def __snake_case ( self : str , lowercase__ : str):
'''simple docstring'''
return get_trunk_forward_outputs(
__A , out_feat_keys=__A , feature_blocks=self._feature_blocks , )
class a_ ( A__ ):
'''simple docstring'''
def __snake_case ( self : List[str] , lowercase__ : Optional[Any]):
'''simple docstring'''
lowerCAmelCase__ = x.split('-')
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:])
def __getitem__( self : Any , lowercase__ : Optional[int]):
'''simple docstring'''
if x not in self:
lowerCAmelCase__ = self.convert_name_to_timm(__A)
lowerCAmelCase__ = partial(lambda: (timm.create_model(__A , pretrained=__A).eval(), None))
else:
lowerCAmelCase__ = super().__getitem__(__A)
return val
class a_ ( A__ ):
'''simple docstring'''
def __getitem__( self : Optional[int] , lowercase__ : Union[str, Any]):
'''simple docstring'''
if "seer" in x and "in1k" not in x:
lowerCAmelCase__ = RegNetModel
else:
lowerCAmelCase__ = RegNetForImageClassification
return val
def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
for from_key, to_key in keys:
lowerCAmelCase__ = from_state_dict[from_key].clone()
print(F"""Copied key={from_key} to={to_key}""" )
return to_state_dict
def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = True , ):
print(F"""Converting {name}...""" )
with torch.no_grad():
lowerCAmelCase__ = from_model_func()
lowerCAmelCase__ = our_model_func(lowercase__ ).eval()
lowerCAmelCase__ = ModuleTransfer(src=lowercase__ , dest=lowercase__ , raise_if_mismatch=lowercase__ )
lowerCAmelCase__ = torch.randn((1, 3, 2_2_4, 2_2_4) )
module_transfer(lowercase__ )
if from_state_dict is not None:
lowerCAmelCase__ = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
lowerCAmelCase__ = [("""0.clf.0.weight""", """classifier.1.weight"""), ("""0.clf.0.bias""", """classifier.1.bias""")]
lowerCAmelCase__ = manually_copy_vissl_head(lowercase__ , our_model.state_dict() , lowercase__ )
our_model.load_state_dict(lowercase__ )
lowerCAmelCase__ = our_model(lowercase__ , output_hidden_states=lowercase__ )
lowerCAmelCase__ = (
our_outputs.logits if isinstance(lowercase__ , lowercase__ ) else our_outputs.last_hidden_state
)
lowerCAmelCase__ = from_model(lowercase__ )
lowerCAmelCase__ = from_output[-1] if type(lowercase__ ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
lowerCAmelCase__ = our_outputs.hidden_states[-1]
assert torch.allclose(lowercase__ , lowercase__ ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='Add model' , use_temp_dir=lowercase__ , )
lowerCAmelCase__ = 2_2_4 if """seer""" not in name else 3_8_4
# we can use the convnext one
lowerCAmelCase__ = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' , size=lowercase__ )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='Add image processor' , use_temp_dir=lowercase__ , )
print(F"""Pushed {name}""" )
def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = True ):
lowerCAmelCase__ = """imagenet-1k-id2label.json"""
lowerCAmelCase__ = 1_0_0_0
lowerCAmelCase__ = (1, num_labels)
lowerCAmelCase__ = """huggingface/label-files"""
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = json.load(open(cached_download(hf_hub_url(lowercase__ , lowercase__ , repo_type='dataset' ) ) , 'r' ) )
lowerCAmelCase__ = {int(lowercase__ ): v for k, v in idalabel.items()}
lowerCAmelCase__ = idalabel
lowerCAmelCase__ = {v: k for k, v in idalabel.items()}
lowerCAmelCase__ = partial(lowercase__ , num_labels=lowercase__ , idalabel=lowercase__ , labelaid=lowercase__ )
lowerCAmelCase__ = {
"""regnet-x-002""": ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[2_4, 5_6, 1_5_2, 3_6_8] , groups_width=8 , layer_type='x' ),
"""regnet-x-004""": ImageNetPreTrainedConfig(
depths=[1, 2, 7, 1_2] , hidden_sizes=[3_2, 6_4, 1_6_0, 3_8_4] , groups_width=1_6 , layer_type='x' ),
"""regnet-x-006""": ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[4_8, 9_6, 2_4_0, 5_2_8] , groups_width=2_4 , layer_type='x' ),
"""regnet-x-008""": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[6_4, 1_2_8, 2_8_8, 6_7_2] , groups_width=1_6 , layer_type='x' ),
"""regnet-x-016""": ImageNetPreTrainedConfig(
depths=[2, 4, 1_0, 2] , hidden_sizes=[7_2, 1_6_8, 4_0_8, 9_1_2] , groups_width=2_4 , layer_type='x' ),
"""regnet-x-032""": ImageNetPreTrainedConfig(
depths=[2, 6, 1_5, 2] , hidden_sizes=[9_6, 1_9_2, 4_3_2, 1_0_0_8] , groups_width=4_8 , layer_type='x' ),
"""regnet-x-040""": ImageNetPreTrainedConfig(
depths=[2, 5, 1_4, 2] , hidden_sizes=[8_0, 2_4_0, 5_6_0, 1_3_6_0] , groups_width=4_0 , layer_type='x' ),
"""regnet-x-064""": ImageNetPreTrainedConfig(
depths=[2, 4, 1_0, 1] , hidden_sizes=[1_6_8, 3_9_2, 7_8_4, 1_6_2_4] , groups_width=5_6 , layer_type='x' ),
"""regnet-x-080""": ImageNetPreTrainedConfig(
depths=[2, 5, 1_5, 1] , hidden_sizes=[8_0, 2_4_0, 7_2_0, 1_9_2_0] , groups_width=1_2_0 , layer_type='x' ),
"""regnet-x-120""": ImageNetPreTrainedConfig(
depths=[2, 5, 1_1, 1] , hidden_sizes=[2_2_4, 4_4_8, 8_9_6, 2_2_4_0] , groups_width=1_1_2 , layer_type='x' ),
"""regnet-x-160""": ImageNetPreTrainedConfig(
depths=[2, 6, 1_3, 1] , hidden_sizes=[2_5_6, 5_1_2, 8_9_6, 2_0_4_8] , groups_width=1_2_8 , layer_type='x' ),
"""regnet-x-320""": ImageNetPreTrainedConfig(
depths=[2, 7, 1_3, 1] , hidden_sizes=[3_3_6, 6_7_2, 1_3_4_4, 2_5_2_0] , groups_width=1_6_8 , layer_type='x' ),
# y variant
"""regnet-y-002""": ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[2_4, 5_6, 1_5_2, 3_6_8] , groups_width=8 ),
"""regnet-y-004""": ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[4_8, 1_0_4, 2_0_8, 4_4_0] , groups_width=8 ),
"""regnet-y-006""": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[4_8, 1_1_2, 2_5_6, 6_0_8] , groups_width=1_6 ),
"""regnet-y-008""": ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[6_4, 1_2_8, 3_2_0, 7_6_8] , groups_width=1_6 ),
"""regnet-y-016""": ImageNetPreTrainedConfig(
depths=[2, 6, 1_7, 2] , hidden_sizes=[4_8, 1_2_0, 3_3_6, 8_8_8] , groups_width=2_4 ),
"""regnet-y-032""": ImageNetPreTrainedConfig(
depths=[2, 5, 1_3, 1] , hidden_sizes=[7_2, 2_1_6, 5_7_6, 1_5_1_2] , groups_width=2_4 ),
"""regnet-y-040""": ImageNetPreTrainedConfig(
depths=[2, 6, 1_2, 2] , hidden_sizes=[1_2_8, 1_9_2, 5_1_2, 1_0_8_8] , groups_width=6_4 ),
"""regnet-y-064""": ImageNetPreTrainedConfig(
depths=[2, 7, 1_4, 2] , hidden_sizes=[1_4_4, 2_8_8, 5_7_6, 1_2_9_6] , groups_width=7_2 ),
"""regnet-y-080""": ImageNetPreTrainedConfig(
depths=[2, 4, 1_0, 1] , hidden_sizes=[1_6_8, 4_4_8, 8_9_6, 2_0_1_6] , groups_width=5_6 ),
"""regnet-y-120""": ImageNetPreTrainedConfig(
depths=[2, 5, 1_1, 1] , hidden_sizes=[2_2_4, 4_4_8, 8_9_6, 2_2_4_0] , groups_width=1_1_2 ),
"""regnet-y-160""": ImageNetPreTrainedConfig(
depths=[2, 4, 1_1, 1] , hidden_sizes=[2_2_4, 4_4_8, 1_2_3_2, 3_0_2_4] , groups_width=1_1_2 ),
"""regnet-y-320""": ImageNetPreTrainedConfig(
depths=[2, 5, 1_2, 1] , hidden_sizes=[2_3_2, 6_9_6, 1_3_9_2, 3_7_1_2] , groups_width=2_3_2 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
"""regnet-y-320-seer""": RegNetConfig(depths=[2, 5, 1_2, 1] , hidden_sizes=[2_3_2, 6_9_6, 1_3_9_2, 3_7_1_2] , groups_width=2_3_2 ),
"""regnet-y-640-seer""": RegNetConfig(depths=[2, 5, 1_2, 1] , hidden_sizes=[3_2_8, 9_8_4, 1_9_6_8, 4_9_2_0] , groups_width=3_2_8 ),
"""regnet-y-1280-seer""": RegNetConfig(
depths=[2, 7, 1_7, 1] , hidden_sizes=[5_2_8, 1_0_5_6, 2_9_0_4, 7_3_9_2] , groups_width=2_6_4 ),
"""regnet-y-2560-seer""": RegNetConfig(
depths=[3, 7, 1_6, 1] , hidden_sizes=[6_4_0, 1_6_9_6, 2_5_4_4, 5_0_8_8] , groups_width=6_4_0 ),
"""regnet-y-10b-seer""": ImageNetPreTrainedConfig(
depths=[2, 7, 1_7, 1] , hidden_sizes=[2_0_2_0, 4_0_4_0, 1_1_1_1_0, 2_8_2_8_0] , groups_width=1_0_1_0 ),
# finetuned on imagenet
"""regnet-y-320-seer-in1k""": ImageNetPreTrainedConfig(
depths=[2, 5, 1_2, 1] , hidden_sizes=[2_3_2, 6_9_6, 1_3_9_2, 3_7_1_2] , groups_width=2_3_2 ),
"""regnet-y-640-seer-in1k""": ImageNetPreTrainedConfig(
depths=[2, 5, 1_2, 1] , hidden_sizes=[3_2_8, 9_8_4, 1_9_6_8, 4_9_2_0] , groups_width=3_2_8 ),
"""regnet-y-1280-seer-in1k""": ImageNetPreTrainedConfig(
depths=[2, 7, 1_7, 1] , hidden_sizes=[5_2_8, 1_0_5_6, 2_9_0_4, 7_3_9_2] , groups_width=2_6_4 ),
"""regnet-y-2560-seer-in1k""": ImageNetPreTrainedConfig(
depths=[3, 7, 1_6, 1] , hidden_sizes=[6_4_0, 1_6_9_6, 2_5_4_4, 5_0_8_8] , groups_width=6_4_0 ),
"""regnet-y-10b-seer-in1k""": ImageNetPreTrainedConfig(
depths=[2, 7, 1_7, 1] , hidden_sizes=[2_0_2_0, 4_0_4_0, 1_1_1_1_0, 2_8_2_8_0] , groups_width=1_0_1_0 ),
}
lowerCAmelCase__ = NameToOurModelFuncMap()
lowerCAmelCase__ = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple[nn.Module, Dict]:
lowerCAmelCase__ = torch.hub.load_state_dict_from_url(lowercase__ , model_dir=str(lowercase__ ) , map_location='cpu' )
lowerCAmelCase__ = model_func()
# check if we have a head, if yes add it
lowerCAmelCase__ = files["""classy_state_dict"""]["""base_model"""]["""model"""]
lowerCAmelCase__ = model_state_dict["""trunk"""]
model.load_state_dict(lowercase__ )
return model.eval(), model_state_dict["heads"]
# pretrained
lowerCAmelCase__ = partial(
lowercase__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
lowerCAmelCase__ = partial(
lowercase__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
lowerCAmelCase__ = partial(
lowercase__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
lowerCAmelCase__ = partial(
lowercase__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=2_7 , group_width=1_0_1_0 , w_a=1_7_4_4 , w_a=6_2_0.8_3 , w_m=2.5_2 ) ) ) , )
# IN1K finetuned
lowerCAmelCase__ = partial(
lowercase__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
lowerCAmelCase__ = partial(
lowercase__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
lowerCAmelCase__ = partial(
lowercase__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
lowerCAmelCase__ = partial(
lowercase__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=2_7 , group_width=1_0_1_0 , w_a=1_7_4_4 , w_a=6_2_0.8_3 , w_m=2.5_2 ) ) ) , )
if model_name:
convert_weight_and_push(
lowercase__ , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , lowercase__ , lowercase__ , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
lowercase__ , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , lowercase__ , lowercase__ , lowercase__ , )
return config, expected_shape
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help=(
'The name of the model you wish to convert, it must be one of the supported regnet* architecture,'
' currently: regnetx-*, regnety-*. If `None`, all of them will the converted.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=Path,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
default=True,
type=bool,
required=False,
help='If True, push model and image processor to the hub.',
)
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 351 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'configuration_x_clip': [
'XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XCLIPConfig',
'XCLIPTextConfig',
'XCLIPVisionConfig',
],
'processing_x_clip': ['XCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'XCLIPModel',
'XCLIPPreTrainedModel',
'XCLIPTextModel',
'XCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 119 | 0 |
"""simple docstring"""
from __future__ import annotations
_snake_case : int = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class _UpperCAmelCase :
def __init__( self :Any , __UpperCamelCase :dict[str, list[str]] , __UpperCamelCase :str ):
A = graph
# mapping node to its parent in resulting breadth first tree
A = {}
A = source_vertex
def lowerCamelCase ( self :str ):
A = {self.source_vertex}
A = None
A = [self.source_vertex] # first in first out queue
while queue:
A = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(__UpperCamelCase )
A = vertex
queue.append(__UpperCamelCase )
def lowerCamelCase ( self :List[Any] , __UpperCamelCase :str ):
if target_vertex == self.source_vertex:
return self.source_vertex
A = self.parent.get(__UpperCamelCase )
if target_vertex_parent is None:
A = (
f"No path from vertex: {self.source_vertex} to vertex: {target_vertex}"
)
raise ValueError(__UpperCamelCase )
return self.shortest_path(__UpperCamelCase ) + f"->{target_vertex}"
if __name__ == "__main__":
_snake_case : List[str] = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo'))
| 292 |
"""simple docstring"""
import math
import sys
def A__ ( UpperCamelCase ):
A = ""
try:
with open(UpperCamelCase , "rb" ) as binary_file:
A = binary_file.read()
for dat in data:
A = F"{dat:08b}"
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def A__ ( UpperCamelCase ):
A = {"0": "0", "1": "1"}
A, A = "", ""
A = len(UpperCamelCase )
for i in range(len(UpperCamelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
A = lexicon[curr_string]
result += last_match_id
A = last_match_id + "0"
if math.loga(UpperCamelCase ).is_integer():
A = {}
for curr_key in list(UpperCamelCase ):
A = lexicon.pop(UpperCamelCase )
A = new_lex
A = last_match_id + "1"
index += 1
A = ""
return result
def A__ ( UpperCamelCase , UpperCamelCase ):
A = 8
try:
with open(UpperCamelCase , "wb" ) as opened_file:
A = [
to_write[i : i + byte_length]
for i in range(0 , len(UpperCamelCase ) , UpperCamelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(UpperCamelCase , 2 ).to_bytes(1 , byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def A__ ( UpperCamelCase ):
A = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
A = data_bits[counter:]
A = data_bits[counter + 1 :]
return data_bits
def A__ ( UpperCamelCase , UpperCamelCase ):
A = read_file_binary(UpperCamelCase )
A = remove_prefix(UpperCamelCase )
A = decompress_data(UpperCamelCase )
write_file_binary(UpperCamelCase , UpperCamelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 292 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
class A__ ( lowerCamelCase__ ):
A__ = ['input_values', 'padding_mask']
def __init__( self : str , _a : Optional[Any] = 1 , _a : Union[str, Any] = 2_4000 , _a : Any = 0.0 , _a : Tuple = None , _a : Optional[Any] = None , **_a : Union[str, Any] , ) -> str:
'''simple docstring'''
super().__init__(feature_size=__lowerCamelCase , sampling_rate=__lowerCamelCase , padding_value=__lowerCamelCase , **__lowerCamelCase )
_SCREAMING_SNAKE_CASE =chunk_length_s
_SCREAMING_SNAKE_CASE =overlap
@property
def A ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def A ( self : str ) -> Optional[int]:
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self : Dict , _a : Union[str, Any] , _a : Dict = None , _a : int = False , _a : List[str] = None , _a : int = None , _a : Optional[int] = None , ) -> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
f" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"
f" {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
if padding and truncation:
raise ValueError('Both padding and truncation were set. Make sure you only set one.' )
elif padding is None:
# by default let's pad the inputs
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =bool(
isinstance(__lowerCamelCase , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) )
if is_batched:
_SCREAMING_SNAKE_CASE =[np.asarray(__lowerCamelCase , dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(__lowerCamelCase , np.ndarray ):
_SCREAMING_SNAKE_CASE =np.asarray(__lowerCamelCase , dtype=np.floataa )
elif isinstance(__lowerCamelCase , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
_SCREAMING_SNAKE_CASE =raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
_SCREAMING_SNAKE_CASE =[np.asarray(__lowerCamelCase ).T]
# verify inputs are valid
for idx, example in enumerate(__lowerCamelCase ):
if example.ndim > 2:
raise ValueError(f"Expected input shape (channels, length) but got shape {example.shape}" )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(f"Expected mono audio but example has {example.shape[-1]} channels" )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(f"Expected stereo audio but example has {example.shape[-1]} channels" )
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =BatchFeature({'input_values': raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
_SCREAMING_SNAKE_CASE =min(array.shape[0] for array in raw_audio )
_SCREAMING_SNAKE_CASE =int(np.floor(max_length / self.chunk_stride ) )
_SCREAMING_SNAKE_CASE =(nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
_SCREAMING_SNAKE_CASE =max(array.shape[0] for array in raw_audio )
_SCREAMING_SNAKE_CASE =int(np.ceil(max_length / self.chunk_stride ) )
_SCREAMING_SNAKE_CASE =(nb_step - 1) * self.chunk_stride + self.chunk_length
_SCREAMING_SNAKE_CASE ='''max_length'''
else:
_SCREAMING_SNAKE_CASE =input_values
# normal padding on batch
if padded_inputs is None:
_SCREAMING_SNAKE_CASE =self.pad(
__lowerCamelCase , max_length=__lowerCamelCase , truncation=__lowerCamelCase , padding=__lowerCamelCase , return_attention_mask=__lowerCamelCase , )
if padding:
_SCREAMING_SNAKE_CASE =padded_inputs.pop('attention_mask' )
_SCREAMING_SNAKE_CASE =[]
for example in padded_inputs.pop('input_values' ):
if self.feature_size == 1:
_SCREAMING_SNAKE_CASE =example[..., None]
input_values.append(example.T )
_SCREAMING_SNAKE_CASE =input_values
if return_tensors is not None:
_SCREAMING_SNAKE_CASE =padded_inputs.convert_to_tensors(__lowerCamelCase )
return padded_inputs
| 356 |
'''simple docstring'''
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def _lowerCAmelCase ( ) -> int:
"""simple docstring"""
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
_SCREAMING_SNAKE_CASE ='__test_patch_submodule_mock__'
with patch_submodule(_test_patching , 'os.path.join' , _UpperCamelCase ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def _lowerCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
assert _test_patching.open is open
_SCREAMING_SNAKE_CASE ='__test_patch_submodule_builtin_mock__'
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , 'open' , _UpperCamelCase ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def _lowerCAmelCase ( ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='__test_patch_submodule_missing_mock__'
with patch_submodule(_test_patching , 'pandas.read_csv' , _UpperCamelCase ):
pass
def _lowerCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='__test_patch_submodule_missing_builtin_mock__'
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , 'len' , _UpperCamelCase ) is None
with patch_submodule(_test_patching , 'len' , _UpperCamelCase ):
assert _test_patching.len is mock
assert _test_patching.len is len
def _lowerCAmelCase ( ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='__test_patch_submodule_start_and_stop_mock__'
_SCREAMING_SNAKE_CASE =patch_submodule(_test_patching , 'open' , _UpperCamelCase )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def _lowerCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
_SCREAMING_SNAKE_CASE ='__test_patch_submodule_successive_join__'
_SCREAMING_SNAKE_CASE ='__test_patch_submodule_successive_dirname__'
_SCREAMING_SNAKE_CASE ='__test_patch_submodule_successive_rename__'
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , 'os.path.join' , _UpperCamelCase ):
with patch_submodule(_test_patching , 'os.rename' , _UpperCamelCase ):
with patch_submodule(_test_patching , 'os.path.dirname' , _UpperCamelCase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , 'os.rename' , _UpperCamelCase ):
with patch_submodule(_test_patching , 'os.path.join' , _UpperCamelCase ):
with patch_submodule(_test_patching , 'os.path.dirname' , _UpperCamelCase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def _lowerCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='__test_patch_submodule_doesnt_exist_mock__'
with patch_submodule(_test_patching , '__module_that_doesn_exist__.__attribute_that_doesn_exist__' , _UpperCamelCase ):
pass
with patch_submodule(_test_patching , 'os.__attribute_that_doesn_exist__' , _UpperCamelCase ):
pass
| 114 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
SCREAMING_SNAKE_CASE_: Optional[int] =None
SCREAMING_SNAKE_CASE_: Dict =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: str ={'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
SCREAMING_SNAKE_CASE_: List[Any] ={
'vocab_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'
),
},
}
SCREAMING_SNAKE_CASE_: List[str] ={
'facebook/nllb-large-en-ro': 10_24,
'facebook/nllb-200-distilled-600M': 10_24,
}
# fmt: off
SCREAMING_SNAKE_CASE_: Dict =['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']
class __A ( UpperCamelCase__ ):
a__ : Dict = VOCAB_FILES_NAMES
a__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
a__ : Tuple = ["""input_ids""", """attention_mask"""]
a__ : Any = NllbTokenizer
a__ : List[int] = []
a__ : List[int] = []
def __init__(self : Tuple , __a : Optional[int]=None , __a : Union[str, Any]=None , __a : int="<s>" , __a : Union[str, Any]="</s>" , __a : List[str]="</s>" , __a : List[str]="<s>" , __a : List[Any]="<unk>" , __a : List[Any]="<pad>" , __a : Any="<mask>" , __a : Tuple=None , __a : int=None , __a : str=None , __a : Any=False , **__a : str , ):
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token
UpperCAmelCase_ = legacy_behaviour
super().__init__(
vocab_file=__a , tokenizer_file=__a , bos_token=__a , eos_token=__a , sep_token=__a , cls_token=__a , unk_token=__a , pad_token=__a , mask_token=__a , src_lang=__a , tgt_lang=__a , additional_special_tokens=__a , legacy_behaviour=__a , **__a , )
UpperCAmelCase_ = vocab_file
UpperCAmelCase_ = False if not self.vocab_file else True
UpperCAmelCase_ = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} )
UpperCAmelCase_ = {
lang_code: self.convert_tokens_to_ids(__a ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
UpperCAmelCase_ = src_lang if src_lang is not None else "eng_Latn"
UpperCAmelCase_ = self.convert_tokens_to_ids(self._src_lang )
UpperCAmelCase_ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _lowercase (self : List[Any] ):
return self._src_lang
@src_lang.setter
def _lowercase (self : List[Any] , __a : str ):
UpperCAmelCase_ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _lowercase (self : Union[str, Any] , __a : List[int] , __a : Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowercase (self : List[Any] , __a : List[int] , __a : Optional[List[int]] = None ):
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase (self : Optional[int] , __a : List[Any] , __a : str , __a : Optional[str] , __a : Optional[str] , **__a : Any ):
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
UpperCAmelCase_ = src_lang
UpperCAmelCase_ = self(__a , add_special_tokens=__a , return_tensors=__a , **__a )
UpperCAmelCase_ = self.convert_tokens_to_ids(__a )
UpperCAmelCase_ = tgt_lang_id
return inputs
def _lowercase (self : Optional[int] , __a : List[str] , __a : str = "eng_Latn" , __a : Optional[List[str]] = None , __a : str = "fra_Latn" , **__a : Tuple , ):
UpperCAmelCase_ = src_lang
UpperCAmelCase_ = tgt_lang
return super().prepare_seqaseq_batch(__a , __a , **__a )
def _lowercase (self : str ):
return self.set_src_lang_special_tokens(self.src_lang )
def _lowercase (self : Any ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _lowercase (self : int , __a : Optional[int] ):
UpperCAmelCase_ = self.convert_tokens_to_ids(__a )
if self.legacy_behaviour:
UpperCAmelCase_ = []
UpperCAmelCase_ = [self.eos_token_id, self.cur_lang_code]
else:
UpperCAmelCase_ = [self.cur_lang_code]
UpperCAmelCase_ = [self.eos_token_id]
UpperCAmelCase_ = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCAmelCase_ = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCAmelCase_ = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _lowercase (self : List[str] , __a : str ):
UpperCAmelCase_ = self.convert_tokens_to_ids(__a )
if self.legacy_behaviour:
UpperCAmelCase_ = []
UpperCAmelCase_ = [self.eos_token_id, self.cur_lang_code]
else:
UpperCAmelCase_ = [self.cur_lang_code]
UpperCAmelCase_ = [self.eos_token_id]
UpperCAmelCase_ = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCAmelCase_ = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCAmelCase_ = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _lowercase (self : Optional[int] , __a : str , __a : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" )
return
UpperCAmelCase_ = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ):
copyfile(self.vocab_file , __a )
return (out_vocab_file,)
| 1 | '''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_: Dict =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: Tuple ={}
class __A ( UpperCamelCase__ ):
a__ : int = """llama"""
a__ : Any = ["""past_key_values"""]
def __init__(self : List[str] , __a : List[str]=32000 , __a : Tuple=4096 , __a : List[Any]=11008 , __a : Dict=32 , __a : Tuple=32 , __a : Any=None , __a : Any="silu" , __a : List[Any]=2048 , __a : List[Any]=0.02 , __a : str=1E-6 , __a : Optional[Any]=True , __a : Union[str, Any]=0 , __a : Any=1 , __a : Dict=2 , __a : Dict=1 , __a : str=False , __a : str=None , **__a : Optional[Any] , ):
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = num_key_value_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = rms_norm_eps
UpperCAmelCase_ = pretraining_tp
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , tie_word_embeddings=__a , **__a , )
def _lowercase (self : List[str] ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __a ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
f"""got {self.rope_scaling}""" )
UpperCAmelCase_ = self.rope_scaling.get("type" , __a )
UpperCAmelCase_ = self.rope_scaling.get("factor" , __a )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(__a , __a ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 1 | 1 |
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ , ) -> None:
"""simple docstring"""
A_ : Tuple = len(a_ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append([""". """ * i + """Q """ + """. """ * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(a_ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , a_ , a_ , )
def UpperCAmelCase ( a_ ) -> None:
"""simple docstring"""
A_ : list[list[str]] = []
depth_first_search([] , [] , [] , a_ , a_ )
# Print all the boards
for board in boards:
for column in board:
print(a_ )
print("""""" )
print(len(a_ ) , """solutions were found.""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 164 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowerCamelCase ) -> Optional[Any]:
A_ : Any = data
A_ : Node | None = None
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self ) -> List[str]:
A_ : Tuple = None
A_ : str = None
def __iter__( self ) -> Iterator[Any]:
A_ : Dict = self.head
while self.head:
yield node.data
A_ : Optional[Any] = node.next
if node == self.head:
break
def __len__( self ) -> int:
return sum(1 for _ in self )
def __repr__( self ) -> str:
return "->".join(str(_lowerCamelCase ) for item in iter(self ) )
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> None:
self.insert_nth(len(self ) , _lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> None:
self.insert_nth(0 , _lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase ) -> None:
if index < 0 or index > len(self ):
raise IndexError("""list index out of range.""" )
A_ : Optional[int] = Node(_lowerCamelCase )
if self.head is None:
A_ : str = new_node # first node points itself
A_ : Union[str, Any] = new_node
elif index == 0: # insert at head
A_ : List[Any] = self.head
A_ : List[Any] = new_node
else:
A_ : List[str] = self.head
for _ in range(index - 1 ):
A_ : Optional[int] = temp.next
A_ : Tuple = temp.next
A_ : str = new_node
if index == len(self ) - 1: # insert at tail
A_ : Optional[int] = new_node
def UpperCAmelCase_ ( self ) -> List[Any]:
return self.delete_nth(0 )
def UpperCAmelCase_ ( self ) -> Any:
return self.delete_nth(len(self ) - 1 )
def UpperCAmelCase_ ( self , _lowerCamelCase = 0 ) -> Any:
if not 0 <= index < len(self ):
raise IndexError("""list index out of range.""" )
A_ : int = self.head
if self.head == self.tail: # just one node
A_ : int = None
elif index == 0: # delete head node
A_ : Union[str, Any] = self.tail.next.next
A_ : Tuple = self.head.next
else:
A_ : Optional[int] = self.head
for _ in range(index - 1 ):
A_ : Tuple = temp.next
A_ : Any = temp.next
A_ : Tuple = temp.next.next
if index == len(self ) - 1: # delete at tail
A_ : List[str] = temp
return delete_node.data
def UpperCAmelCase_ ( self ) -> bool:
return len(self ) == 0
def UpperCAmelCase ( ) -> None:
"""simple docstring"""
A_ : Any = CircularLinkedList()
assert len(a_ ) == 0
assert circular_linked_list.is_empty() is True
assert str(a_ ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(a_ ) == i
circular_linked_list.insert_nth(a_ , i + 1 )
assert str(a_ ) == "->".join(str(a_ ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(a_ ) == "->".join(str(a_ ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(a_ ) == "->".join(str(a_ ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(a_ ) == "->".join(str(a_ ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(a_ ) == "->".join(str(a_ ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 164 | 1 |
'''simple docstring'''
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
lowerCAmelCase :List[Any] = 2
class _lowerCamelCase :
'''simple docstring'''
def __init__( self : Tuple , *, # begin keyword-only arguments
_A : Any="<s>" , _A : Optional[int]="<pad>" , _A : Optional[Any]="</s>" , _A : Optional[int]="<unk>" , _A : List[str]=None , ) -> List[Any]:
__magic_name__ : List[Any] = bos, unk, pad, eos
__magic_name__ : Union[str, Any] = []
__magic_name__ : Tuple = []
__magic_name__ : int = {}
__magic_name__ : str = self.add_symbol(_A )
__magic_name__ : List[str] = self.add_symbol(_A )
__magic_name__ : List[str] = self.add_symbol(_A )
__magic_name__ : str = self.add_symbol(_A )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(_A )
__magic_name__ : Dict = len(self.symbols )
def __eq__( self : List[str] , _A : Tuple ) -> Any:
return self.indices == other.indices
def __getitem__( self : Optional[int] , _A : Optional[Any] ) -> Union[str, Any]:
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : Optional[Any] ) -> Optional[int]:
return len(self.symbols )
def __contains__( self : Any , _A : List[str] ) -> List[Any]:
return sym in self.indices
@classmethod
def __lowerCAmelCase ( cls : Any , _A : Optional[Any] ) -> Tuple:
__magic_name__ : Any = cls()
d.add_from_file(_A )
return d
def __lowerCAmelCase ( self : Any , _A : Optional[int] , _A : List[Any]=1 , _A : Any=False ) -> Dict:
if word in self.indices and not overwrite:
__magic_name__ : Tuple = self.indices[word]
__magic_name__ : Tuple = self.count[idx] + n
return idx
else:
__magic_name__ : Union[str, Any] = len(self.symbols )
__magic_name__ : str = idx
self.symbols.append(_A )
self.count.append(_A )
return idx
def __lowerCAmelCase ( self : Optional[Any] , _A : str ) -> Optional[int]:
return 0
def __lowerCAmelCase ( self : Optional[int] , _A : List[Any] ) -> Union[str, Any]:
if isinstance(_A , _A ):
try:
with open(_A , 'r' , encoding='utf-8' ) as fd:
self.add_from_file(_A )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception('Incorrect encoding detected in {}, please rebuild the dataset'.format(_A ) )
return
__magic_name__ : Optional[Any] = f.readlines()
__magic_name__ : Optional[Any] = self._load_meta(_A )
for line in lines[indices_start_line:]:
try:
__magic_name__ : Union[str, Any] = line.rstrip().rsplit(' ' , 1 )
if field == "#fairseq:overwrite":
__magic_name__ : List[str] = True
__magic_name__ : Any = line.rsplit(' ' , 1 )
else:
__magic_name__ : Optional[Any] = False
__magic_name__ : Optional[Any] = int(_A )
__magic_name__ : List[str] = line
if word in self and not overwrite:
raise RuntimeError(
'Duplicate word found when loading Dictionary: \'{}\'. '
'Duplicate words can overwrite earlier ones by adding the '
'#fairseq:overwrite flag at the end of the corresponding row '
'in the dictionary file. If using the Camembert model, please '
'download an updated copy of the model file.'.format(_A ) )
self.add_symbol(_A , n=_A , overwrite=_A )
except ValueError:
raise ValueError('Incorrect dictionary format, expected \'<token> <cnt> [flags]\'' )
def lowerCamelCase ( lowerCAmelCase : Tuple ):
"""simple docstring"""
__magic_name__ : Any = dict((re.sub(R'@@$' , '' , UpperCamelCase__ ), v) if k.endswith('@@' ) else (re.sub(R'$' , '</w>' , UpperCamelCase__ ), v) for k, v in d.items() )
__magic_name__ : Optional[Any] = '''<s> <pad> </s> <unk>'''.split()
# restore the special tokens
for k in keep_keys:
del da[f'{k}</w>']
__magic_name__ : Union[str, Any] = d[k] # restore
return da
def lowerCamelCase ( lowerCAmelCase : List[str] , lowerCAmelCase : List[str] ):
"""simple docstring"""
if not os.path.exists(UpperCamelCase__ ):
raise ValueError(f'path {biogpt_checkpoint_path} does not exist!' )
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
print(f'Writing results to {pytorch_dump_folder_path}' )
# handle various types of models
__magic_name__ : List[Any] = os.path.join(UpperCamelCase__ , 'checkpoint.pt' )
if not os.path.isfile(UpperCamelCase__ ):
raise ValueError(f'path to the file {checkpoint_file} does not exist!' )
__magic_name__ : Optional[int] = torch.load(UpperCamelCase__ , map_location='cpu' )
__magic_name__ : Optional[int] = chkpt['''cfg''']['''model''']
# dicts
__magic_name__ : str = os.path.join(UpperCamelCase__ , 'dict.txt' )
if not os.path.isfile(UpperCamelCase__ ):
raise ValueError(f'path to the file {dict_file} does not exist!' )
__magic_name__ : Tuple = Dictionary.load(UpperCamelCase__ )
__magic_name__ : Dict = rewrite_dict_keys(src_dict.indices )
__magic_name__ : Optional[Any] = len(UpperCamelCase__ )
__magic_name__ : Tuple = os.path.join(UpperCamelCase__ , VOCAB_FILES_NAMES['vocab_file'] )
print(f'Generating {src_vocab_file} of {src_vocab_size} records' )
with open(UpperCamelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(UpperCamelCase__ , ensure_ascii=UpperCamelCase__ , indent=UpperCamelCase__ ) )
# merges_file (bpecodes)
__magic_name__ : int = os.path.join(UpperCamelCase__ , 'bpecodes' )
if not os.path.isfile(UpperCamelCase__ ):
raise ValueError(f'path to the file {bpecodes_file} does not exist!' )
__magic_name__ : int = os.path.join(UpperCamelCase__ , VOCAB_FILES_NAMES['merges_file'] )
shutil.copyfile(UpperCamelCase__ , UpperCamelCase__ )
# model config
__magic_name__ : Dict = os.path.join(UpperCamelCase__ , 'config.json' )
__magic_name__ : Tuple = {
'''activation_dropout''': args['''activation_dropout'''],
'''architectures''': ['''BioGptForCausalLM'''],
'''attention_probs_dropout_prob''': args['''attention_dropout'''],
'''bos_token_id''': 0,
'''eos_token_id''': 2,
'''hidden_act''': args['''activation_fn'''],
'''hidden_dropout_prob''': args['''dropout'''],
'''hidden_size''': args['''decoder_embed_dim'''],
'''initializer_range''': 0.02,
'''intermediate_size''': args['''decoder_ffn_embed_dim'''],
'''layer_norm_eps''': 1e-12,
'''layerdrop''': args['''decoder_layerdrop'''],
'''max_position_embeddings''': args['''max_target_positions'''],
'''model_type''': '''biogpt''',
'''num_attention_heads''': args['''decoder_attention_heads'''],
'''num_hidden_layers''': args['''decoder_layers'''],
'''pad_token_id''': 1,
'''scale_embedding''': not args['''no_scale_embedding'''],
'''tie_word_embeddings''': args['''share_decoder_input_output_embed'''],
'''vocab_size''': src_vocab_size,
}
# good hparam defaults to start with
print(f'Generating {biogpt_model_config_file}' )
with open(UpperCamelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(UpperCamelCase__ , ensure_ascii=UpperCamelCase__ , indent=UpperCamelCase__ ) )
# tokenizer config
__magic_name__ : List[Any] = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ : List[Any] = {
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
'''model_max_length''': 1024,
'''pad_token''': '''<pad>''',
'''special_tokens_map_file''': None,
'''tokenizer_class''': '''BioGptTokenizer''',
'''unk_token''': '''<unk>''',
}
print(f'Generating {biogpt_tokenizer_config_file}' )
with open(UpperCamelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(UpperCamelCase__ , ensure_ascii=UpperCamelCase__ , indent=UpperCamelCase__ ) )
# model
__magic_name__ : Union[str, Any] = chkpt['''model''']
# remove unneeded keys
__magic_name__ : Optional[Any] = [
'''decoder.version''',
]
for k in ignore_keys:
model_state_dict.pop(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ : Dict = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith('output_projection.weight' ):
__magic_name__ : Dict = model_state_dict.pop(UpperCamelCase__ )
else:
__magic_name__ : Optional[Any] = model_state_dict.pop(UpperCamelCase__ )
__magic_name__ : int = BioGptConfig.from_pretrained(UpperCamelCase__ )
__magic_name__ : Union[str, Any] = BioGptForCausalLM(UpperCamelCase__ )
# check that it loads ok
model_new.load_state_dict(UpperCamelCase__ )
# save
__magic_name__ : Tuple = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
print(f'Generating {pytorch_weights_dump_path}' )
torch.save(UpperCamelCase__ , UpperCamelCase__ )
print('Conversion is done!' )
if __name__ == "__main__":
lowerCAmelCase :Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--biogpt_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'''
''' bpecodes, etc.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase :int = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path) | 331 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
_lowerCAmelCase :Optional[Any] = False
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : Tuple = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_UpperCAmelCase : List[str] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
_UpperCAmelCase : Optional[Any] = torch.manual_seed(0 )
_UpperCAmelCase : List[Any] = pipe.dual_guided(
prompt='''first prompt''' , image=A , text_to_image_strength=0.75 , generator=A , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(A )
_UpperCAmelCase : int = VersatileDiffusionPipeline.from_pretrained(A , torch_dtype=torch.floataa )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_UpperCAmelCase : int = generator.manual_seed(0 )
_UpperCAmelCase : Union[str, Any] = pipe.dual_guided(
prompt='''first prompt''' , image=A , text_to_image_strength=0.75 , generator=A , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def __lowerCAmelCase ( self ) -> List[str]:
_UpperCAmelCase : List[Any] = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_UpperCAmelCase : int = '''cyberpunk 2077'''
_UpperCAmelCase : Optional[int] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
_UpperCAmelCase : str = torch.manual_seed(0 )
_UpperCAmelCase : Optional[Any] = pipe.dual_guided(
prompt=A , image=A , text_to_image_strength=0.75 , generator=A , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='''numpy''' , ).images
_UpperCAmelCase : Union[str, Any] = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_UpperCAmelCase : List[Any] = np.array([0.1_448, 0.1_619, 0.1_741, 0.1_086, 0.1_147, 0.1_128, 0.1_199, 0.1_165, 0.1_001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
_UpperCAmelCase : Dict = '''A painting of a squirrel eating a burger '''
_UpperCAmelCase : Tuple = torch.manual_seed(0 )
_UpperCAmelCase : Optional[Any] = pipe.text_to_image(
prompt=A , generator=A , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='''numpy''' ).images
_UpperCAmelCase : Tuple = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_UpperCAmelCase : int = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
_UpperCAmelCase : int = pipe.image_variation(A , generator=A , output_type='''numpy''' ).images
_UpperCAmelCase : Optional[int] = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_UpperCAmelCase : List[str] = np.array([0.3_076, 0.3_123, 0.3_284, 0.3_782, 0.3_770, 0.3_894, 0.4_297, 0.4_331, 0.4_456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 263 | 0 |
"""simple docstring"""
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
_lowerCamelCase : List[Any] = '''2.13.1'''
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('''3.7'''):
raise ImportWarning(
'''To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'''
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'''To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'''
'''If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'''
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_lowerCamelCase : Tuple = concatenate_datasets
_lowerCamelCase : List[Any] = DownloadConfig
_lowerCamelCase : Optional[int] = DownloadManager
_lowerCamelCase : Tuple = DownloadMode
_lowerCamelCase : List[str] = DownloadConfig
_lowerCamelCase : Optional[int] = DownloadMode
_lowerCamelCase : List[str] = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 361 | from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class lowercase :
lowercase__ : torch.Tensor # [batch_size x 3]
lowercase__ : torch.Tensor # [batch_size x 3]
lowercase__ : torch.Tensor # [batch_size x 3]
lowercase__ : torch.Tensor # [batch_size x 3]
lowercase__ : int
lowercase__ : int
lowercase__ : float
lowercase__ : float
lowercase__ : Tuple[int]
def __snake_case( self : str ) -> Dict:
'''simple docstring'''
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def __snake_case( self : int ) -> str:
'''simple docstring'''
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def __snake_case( self : Tuple ) -> List[str]:
'''simple docstring'''
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def __snake_case( self : Any ) -> torch.Tensor:
'''simple docstring'''
SCREAMING_SNAKE_CASE = torch.arange(self.height * self.width )
SCREAMING_SNAKE_CASE = torch.stack(
[
pixel_indices % self.width,
torch.div(_UpperCamelCase , self.width , rounding_mode="trunc" ),
] , axis=1 , )
return coords
@property
def __snake_case( self : Any ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE = self.shape
SCREAMING_SNAKE_CASE = int(np.prod(_UpperCamelCase ) )
SCREAMING_SNAKE_CASE = self.get_image_coords()
SCREAMING_SNAKE_CASE = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
SCREAMING_SNAKE_CASE = self.get_camera_rays(_UpperCamelCase )
SCREAMING_SNAKE_CASE = rays.view(_UpperCamelCase , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def __snake_case( self : Optional[int] , _UpperCamelCase : torch.Tensor ) -> torch.Tensor:
'''simple docstring'''
SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
SCREAMING_SNAKE_CASE = coords.view(_UpperCamelCase , -1 , 2 )
SCREAMING_SNAKE_CASE = self.resolution()
SCREAMING_SNAKE_CASE = self.fov()
SCREAMING_SNAKE_CASE = (flat.float() / (res - 1)) * 2 - 1
SCREAMING_SNAKE_CASE = fracs * torch.tan(fov / 2 )
SCREAMING_SNAKE_CASE = fracs.view(_UpperCamelCase , -1 , 2 )
SCREAMING_SNAKE_CASE = (
self.z.view(_UpperCamelCase , 1 , 3 )
+ self.x.view(_UpperCamelCase , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(_UpperCamelCase , 1 , 3 ) * fracs[:, :, 1:]
)
SCREAMING_SNAKE_CASE = directions / directions.norm(dim=-1 , keepdim=_UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.stack(
[
torch.broadcast_to(self.origin.view(_UpperCamelCase , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(_UpperCamelCase , *_UpperCamelCase , 2 , 3 )
def __snake_case( self : List[Any] , _UpperCamelCase : int , _UpperCamelCase : int ) -> "DifferentiableProjectiveCamera":
'''simple docstring'''
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=_UpperCamelCase , height=_UpperCamelCase , x_fov=self.x_fov , y_fov=self.y_fov , )
def __lowerCamelCase (UpperCAmelCase__ : int ):
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
for theta in np.linspace(0 , 2 * np.pi , num=2_0 ):
SCREAMING_SNAKE_CASE = np.array([np.sin(UpperCAmelCase__ ), np.cos(UpperCAmelCase__ ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
SCREAMING_SNAKE_CASE = -z * 4
SCREAMING_SNAKE_CASE = np.array([np.cos(UpperCAmelCase__ ), -np.sin(UpperCAmelCase__ ), 0.0] )
SCREAMING_SNAKE_CASE = np.cross(UpperCAmelCase__ , UpperCAmelCase__ )
origins.append(UpperCAmelCase__ )
xs.append(UpperCAmelCase__ )
ys.append(UpperCAmelCase__ )
zs.append(UpperCAmelCase__ )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(UpperCAmelCase__ , axis=0 ) ).float() , x=torch.from_numpy(np.stack(UpperCAmelCase__ , axis=0 ) ).float() , y=torch.from_numpy(np.stack(UpperCAmelCase__ , axis=0 ) ).float() , z=torch.from_numpy(np.stack(UpperCAmelCase__ , axis=0 ) ).float() , width=UpperCAmelCase__ , height=UpperCAmelCase__ , x_fov=0.7 , y_fov=0.7 , shape=(1, len(UpperCAmelCase__ )) , )
| 206 | 0 |
'''simple docstring'''
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
lowerCamelCase : int = logging.getLogger(__name__)
def _SCREAMING_SNAKE_CASE () -> List[Any]:
"""simple docstring"""
lowercase__ = argparse.ArgumentParser(
description='''Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.''' )
parser.add_argument(
'''--dataset_name''' , type=A , default='''wikitext''' , help='''Name of the training. Explore datasets at: hf.co/datasets.''' , )
parser.add_argument(
'''--dataset_config''' , type=A , default='''wikitext-103-raw-v1''' , help='''Configuration name of the dataset.''' )
parser.add_argument(
'''--tokenizer_name_or_path''' , type=A , default='''sayakpaul/unigram-tokenizer-wikitext''' , help='''Tokenizer identifier. Can be a local filepath or a Hub identifier.''' , )
parser.add_argument(
'''--shard_size''' , type=A , default=1_000 , help='''Number of entries to go in a single shard.''' , )
parser.add_argument('''--split''' , type=A , default='''train''' , choices=['''train''', '''test''', '''validation'''] )
parser.add_argument(
'''--limit''' , default=A , type=A , help='''Limit the number of shards (used for debugging).''' , )
parser.add_argument(
'''--max_length''' , type=A , default=512 , help='''Maximum sequence length. For training on TPUs, it helps to have a maximum'''
''' sequence length that is a multiple of 8.''' , )
parser.add_argument(
'''--output_dir''' , default='''tf-tpu''' , type=A , help='''Output directory where the TFRecord shards will be saved. If the'''
''' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'''
''' shards will be directly saved to a Google Cloud Storage bucket.''' , )
lowercase__ = parser.parse_args()
return args
def _SCREAMING_SNAKE_CASE (A ) -> str:
"""simple docstring"""
def fn(A ):
return tokenizer(examples['''text'''] )
return fn
def _SCREAMING_SNAKE_CASE (A ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = []
for i in range(len(tokenized_data['''input_ids'''] ) ):
lowercase__ = {
'''input_ids''': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['''input_ids'''][i] ) ),
'''attention_mask''': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['''attention_mask'''][i] ) ),
}
lowercase__ = tf.train.Features(feature=A )
lowercase__ = tf.train.Example(features=A )
lowercase__ = example.SerializeToString()
records.append(A )
return records
def _SCREAMING_SNAKE_CASE (A ) -> Tuple:
"""simple docstring"""
lowercase__ = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
lowercase__ = min(len(A ) , args.limit )
lowercase__ = dataset.select(range(A ) )
print(f"Limiting the dataset to {args.limit} entries." )
lowercase__ = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
lowercase__ = os.path.join(args.output_dir , args.split )
if not os.path.exists(A ):
os.makedirs(A )
else:
lowercase__ = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
lowercase__ = tokenize_function(A )
lowercase__ = dataset.map(A , batched=A , num_proc=4 , remove_columns=['''text'''] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(A ):
# Concatenate all texts.
lowercase__ = {k: sum(examples[k] , [] ) for k in examples.keys()}
lowercase__ = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
lowercase__ = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
lowercase__ = {
k: [t[i : i + args.max_length] for i in range(0 , A , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
lowercase__ = dataset_tokenized.map(A , batched=A , batch_size=1_000 , num_proc=4 )
lowercase__ = 0
lowercase__ = 0
for shard in range(0 , len(A ) , args.shard_size ):
lowercase__ = grouped_dataset[shard : shard + args.shard_size]
lowercase__ = len(dataset_snapshot['''input_ids'''] )
lowercase__ = os.path.join(A , f"dataset-{shard_count}-{records_containing}.tfrecord" )
lowercase__ = get_serialized_examples(A )
with tf.io.TFRecordWriter(A ) as out_file:
for i in range(len(A ) ):
lowercase__ = serialized_examples[i]
out_file.write(A )
print('''Wrote file {} containing {} records'''.format(A , A ) )
shard_count += 1
total_records += records_containing
with open(f"split-{args.split}-records-count.txt" , '''w''' ) as f:
print(f"Total {args.split} records: {total_records}" , file=A )
if __name__ == "__main__":
lowerCamelCase : List[Any] = parse_args()
main(args)
| 2 | '''simple docstring'''
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class _lowercase :
'''simple docstring'''
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str]=2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE__ : Optional[Any]=64 , SCREAMING_SNAKE_CASE__ : int=None ) -> Optional[int]:
__lowerCAmelCase = np.random.default_rng(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = length
__lowerCAmelCase = rng.normal(size=(length,) ).astype(np.floataa )
__lowerCAmelCase = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self : Union[str, Any] ) -> Optional[Any]:
return self.length
def __getitem__( self : str , SCREAMING_SNAKE_CASE__ : Dict ) -> Optional[int]:
return {"x": self.x[i], "y": self.y[i]}
class _lowercase ( torch.nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE__ : Tuple=0 , SCREAMING_SNAKE_CASE__ : Tuple=False ) -> Any:
super().__init__()
__lowerCAmelCase = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
__lowerCAmelCase = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
__lowerCAmelCase = True
def a ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any]=None ) -> str:
if self.first_batch:
print(f"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
__lowerCAmelCase = False
return x * self.a[0] + self.b[0]
class _lowercase ( torch.nn.Module ):
'''simple docstring'''
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0 , SCREAMING_SNAKE_CASE__ : Optional[Any]=False ) -> Optional[Any]:
super().__init__()
__lowerCAmelCase = torch.nn.Parameter(torch.tensor(SCREAMING_SNAKE_CASE__ ).float() )
__lowerCAmelCase = torch.nn.Parameter(torch.tensor(SCREAMING_SNAKE_CASE__ ).float() )
__lowerCAmelCase = True
def a ( self : List[str] , SCREAMING_SNAKE_CASE__ : Any=None ) -> int:
if self.first_batch:
print(f"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
__lowerCAmelCase = False
return x * self.a + self.b
def UpperCamelCase_ ( snake_case_ : List[str] , snake_case_ : int = 16 ) -> int:
'''simple docstring'''
from datasets import load_dataset
from transformers import AutoTokenizer
__lowerCAmelCase = AutoTokenizer.from_pretrained("""bert-base-cased""" )
__lowerCAmelCase = {"""train""": """tests/test_samples/MRPC/train.csv""", """validation""": """tests/test_samples/MRPC/dev.csv"""}
__lowerCAmelCase = load_dataset("""csv""" , data_files=snake_case_ )
__lowerCAmelCase = datasets["""train"""].unique("""label""" )
__lowerCAmelCase = {v: i for i, v in enumerate(snake_case_ )}
def tokenize_function(snake_case_ : Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
__lowerCAmelCase = tokenizer(
examples["""sentence1"""] , examples["""sentence2"""] , truncation=snake_case_ , max_length=snake_case_ , padding="""max_length""" )
if "label" in examples:
__lowerCAmelCase = [label_to_id[l] for l in examples["""label"""]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__lowerCAmelCase = datasets.map(
snake_case_ , batched=snake_case_ , remove_columns=["""sentence1""", """sentence2""", """label"""] , )
def collate_fn(snake_case_ : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(snake_case_ , padding="""max_length""" , max_length=1_28 , return_tensors="""pt""" )
return tokenizer.pad(snake_case_ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
__lowerCAmelCase = DataLoader(tokenized_datasets["""train"""] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=2 )
__lowerCAmelCase = DataLoader(tokenized_datasets["""validation"""] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=1 )
return train_dataloader, eval_dataloader
| 229 | 0 |
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def _UpperCamelCase ( UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : str=None , UpperCamelCase_ : int=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Dict=None , ) -> Tuple:
"""simple docstring"""
if attention_mask is None:
lowerCAmelCase__ = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
lowerCAmelCase__ = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
lowerCAmelCase__ = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=UpperCamelCase_ )
if decoder_head_mask is None:
lowerCAmelCase__ = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=UpperCamelCase_ )
if cross_attn_head_mask is None:
lowerCAmelCase__ = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=UpperCamelCase_ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class __SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCamelCase , _UpperCamelCase=13 , _UpperCamelCase=7 , _UpperCamelCase=True , _UpperCamelCase=False , _UpperCamelCase=99 , _UpperCamelCase=16 , _UpperCamelCase=2 , _UpperCamelCase=4 , _UpperCamelCase=4 , _UpperCamelCase="relu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=0.0 , _UpperCamelCase=0.0 , _UpperCamelCase=20 , _UpperCamelCase=2 , _UpperCamelCase=1 , _UpperCamelCase=0 , ):
"""simple docstring"""
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = seq_length
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = encoder_layerdrop
lowerCAmelCase__ = decoder_layerdrop
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = eos_token_id
lowerCAmelCase__ = pad_token_id
lowerCAmelCase__ = bos_token_id
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = self.eos_token_id # Eos Token
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
lowerCAmelCase__ = input_ids.clamp(self.pad_token_id + 1 )
lowerCAmelCase__ = decoder_input_ids.clamp(self.pad_token_id + 1 )
lowerCAmelCase__ = self.get_config()
lowerCAmelCase__ = prepare_mam_aaa_inputs_dict(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return config, inputs_dict
def UpperCamelCase__ ( self ):
"""simple docstring"""
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = self.prepare_config_and_inputs()
return config, inputs_dict
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = MaMaaaModel(config=_UpperCamelCase ).get_decoder().to(_UpperCamelCase ).eval()
lowerCAmelCase__ = inputs_dict['input_ids']
lowerCAmelCase__ = inputs_dict['attention_mask']
lowerCAmelCase__ = inputs_dict['head_mask']
# first forward pass
lowerCAmelCase__ = model(_UpperCamelCase , attention_mask=_UpperCamelCase , head_mask=_UpperCamelCase , use_cache=_UpperCamelCase )
lowerCAmelCase__ , lowerCAmelCase__ = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase__ = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
lowerCAmelCase__ = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase__ = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
lowerCAmelCase__ = model(_UpperCamelCase , attention_mask=_UpperCamelCase )['last_hidden_state']
lowerCAmelCase__ = model(_UpperCamelCase , attention_mask=_UpperCamelCase , past_key_values=_UpperCamelCase )[
'last_hidden_state'
]
# select random slice
lowerCAmelCase__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase__ = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=1E-2 ) )
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = MaMaaaModel(config=_UpperCamelCase ).to(_UpperCamelCase ).eval()
lowerCAmelCase__ = model(**_UpperCamelCase )
lowerCAmelCase__ = outputs.encoder_last_hidden_state
lowerCAmelCase__ = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ = model.get_encoder()
encoder.save_pretrained(_UpperCamelCase )
lowerCAmelCase__ = MaMaaaEncoder.from_pretrained(_UpperCamelCase ).to(_UpperCamelCase )
lowerCAmelCase__ = encoder(inputs_dict['input_ids'] , attention_mask=inputs_dict['attention_mask'] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ = model.get_decoder()
decoder.save_pretrained(_UpperCamelCase )
lowerCAmelCase__ = MaMaaaDecoder.from_pretrained(_UpperCamelCase ).to(_UpperCamelCase )
lowerCAmelCase__ = decoder(
input_ids=inputs_dict['decoder_input_ids'] , attention_mask=inputs_dict['decoder_attention_mask'] , encoder_hidden_states=_UpperCamelCase , encoder_attention_mask=inputs_dict['attention_mask'] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class __SCREAMING_SNAKE_CASE ( __lowercase , __lowercase , __lowercase , unittest.TestCase):
_SCREAMING_SNAKE_CASE : Optional[Any] = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE : Optional[int] = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE : Optional[int] = (
{
'''conversational''': MaMaaaForConditionalGeneration,
'''feature-extraction''': MaMaaaModel,
'''summarization''': MaMaaaForConditionalGeneration,
'''text2text-generation''': MaMaaaForConditionalGeneration,
'''translation''': MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE : Optional[int] = True
_SCREAMING_SNAKE_CASE : Optional[Any] = True
_SCREAMING_SNAKE_CASE : List[Any] = False
_SCREAMING_SNAKE_CASE : Tuple = False
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = MaMaaaModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=_UpperCamelCase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(_UpperCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_UpperCamelCase )
lowerCAmelCase__ , lowerCAmelCase__ = model_class.from_pretrained(_UpperCamelCase , output_loading_info=_UpperCamelCase )
self.assertEqual(info['missing_keys'] , [] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*_UpperCamelCase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*_UpperCamelCase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
lowerCAmelCase__ = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
lowerCAmelCase__ = copy.deepcopy(self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
if not self.is_encoder_decoder:
lowerCAmelCase__ = inputs['input_ids']
del inputs["input_ids"]
else:
lowerCAmelCase__ = inputs['input_ids']
lowerCAmelCase__ = inputs.get('decoder_input_ids' , _UpperCamelCase )
del inputs["input_ids"]
inputs.pop('decoder_input_ids' , _UpperCamelCase )
lowerCAmelCase__ = model.get_input_embeddings()
if not self.is_encoder_decoder:
lowerCAmelCase__ = wte(_UpperCamelCase )
else:
lowerCAmelCase__ = wte(_UpperCamelCase )
lowerCAmelCase__ = wte(_UpperCamelCase )
with torch.no_grad():
model(**_UpperCamelCase )[0]
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase__ = input_dict['input_ids']
lowerCAmelCase__ = input_ids.ne(1 ).to(_UpperCamelCase )
lowerCAmelCase__ = MaMaaaForConditionalGeneration(_UpperCamelCase ).eval().to(_UpperCamelCase )
if torch_device == "cuda":
model.half()
model.generate(_UpperCamelCase , attention_mask=_UpperCamelCase )
model.generate(num_beams=4 , do_sample=_UpperCamelCase , early_stopping=_UpperCamelCase , num_return_sequences=3 )
def _UpperCamelCase ( UpperCamelCase_ : int ) -> Dict:
"""simple docstring"""
return torch.tensor(UpperCamelCase_ , dtype=torch.long , device=UpperCamelCase_ )
__snake_case : List[str] = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
@cached_property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return MaMaaaTokenizer.from_pretrained('facebook/m2m100_418M' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = MaMaaaModel.from_pretrained('facebook/m2m100_418M' ).to(_UpperCamelCase )
lowerCAmelCase__ = _long_tensor([[12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38, 2]] )
lowerCAmelCase__ = _long_tensor([[2, 12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38]] )
lowerCAmelCase__ = prepare_mam_aaa_inputs_dict(model.config , _UpperCamelCase , _UpperCamelCase )
with torch.no_grad():
lowerCAmelCase__ = model(**_UpperCamelCase )[0]
lowerCAmelCase__ = torch.Size((1, 11, 10_24) )
self.assertEqual(output.shape , _UpperCamelCase )
# change to expected output here
lowerCAmelCase__ = torch.tensor(
[[-0.77_80, -0.16_76, 0.10_38], [-6.75_56, -1.39_92, 0.05_67], [-7.53_83, -0.59_20, -0.27_79]] , device=_UpperCamelCase )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCamelCase , atol=_UpperCamelCase ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = MaMaaaForConditionalGeneration.from_pretrained('facebook/m2m100_418M' ).to(_UpperCamelCase )
# change to intended input
lowerCAmelCase__ = _long_tensor([[12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38, 2]] )
lowerCAmelCase__ = _long_tensor([[2, 12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38]] )
lowerCAmelCase__ = prepare_mam_aaa_inputs_dict(model.config , _UpperCamelCase , _UpperCamelCase )
with torch.no_grad():
lowerCAmelCase__ = model(**_UpperCamelCase )[0]
lowerCAmelCase__ = torch.Size((1, 11, model.config.vocab_size) )
self.assertEqual(output.shape , _UpperCamelCase )
# change to expected output here
lowerCAmelCase__ = torch.tensor(
[[-1.04_48, -1.04_11, 3.79_92], [-3.21_91, -3.23_86, -1.34_51], [-3.62_10, -3.59_93, 0.49_25]] , device=_UpperCamelCase )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCamelCase , atol=_UpperCamelCase ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = MaMaaaForConditionalGeneration.from_pretrained('facebook/m2m100_418M' ).to(_UpperCamelCase )
lowerCAmelCase__ = MaMaaaTokenizer.from_pretrained('facebook/m2m100_418M' , src_lang='fr' , tgt_lang='en' )
lowerCAmelCase__ = [
'L\'affaire NSA souligne l\'absence totale de débat sur le renseignement',
'Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.',
'Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent'
' Fabius convoque l\'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de'
' l\'ampleur de la surveillance américaine sur l\'ensemble des communications en France.',
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
lowerCAmelCase__ = tokenizer(_UpperCamelCase , padding=_UpperCamelCase , return_tensors='pt' )
lowerCAmelCase__ = model.generate(
input_ids=dct['input_ids'].to(_UpperCamelCase ) , attention_mask=dct['attention_mask'].to(_UpperCamelCase ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id('en' ) , )
lowerCAmelCase__ = [
'The NSA case highlights the total absence of intelligence debate',
'I think there are two levels of response from the French government.',
'When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S.'
' Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all'
' communications in France.',
]
lowerCAmelCase__ = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=_UpperCamelCase , skip_special_tokens=_UpperCamelCase )
assert generated == expected_en
| 122 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case : Union[str, Any] = {
"""configuration_convbert""": ["""CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvBertConfig""", """ConvBertOnnxConfig"""],
"""tokenization_convbert""": ["""ConvBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[str] = ["""ConvBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[Any] = [
"""CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConvBertForMaskedLM""",
"""ConvBertForMultipleChoice""",
"""ConvBertForQuestionAnswering""",
"""ConvBertForSequenceClassification""",
"""ConvBertForTokenClassification""",
"""ConvBertLayer""",
"""ConvBertModel""",
"""ConvBertPreTrainedModel""",
"""load_tf_weights_in_convbert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Union[str, Any] = [
"""TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFConvBertForMaskedLM""",
"""TFConvBertForMultipleChoice""",
"""TFConvBertForQuestionAnswering""",
"""TFConvBertForSequenceClassification""",
"""TFConvBertForTokenClassification""",
"""TFConvBertLayer""",
"""TFConvBertModel""",
"""TFConvBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
__snake_case : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 122 | 1 |
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
# Load configuration defined in the metadata file
with open(_SCREAMING_SNAKE_CASE ) as metadata_file:
snake_case_ = json.load(_SCREAMING_SNAKE_CASE )
snake_case_ = LukeConfig(use_entity_aware_attention=_SCREAMING_SNAKE_CASE , **metadata["""model_config"""] )
# Load in the weights from the checkpoint_path
snake_case_ = torch.load(_SCREAMING_SNAKE_CASE , map_location="""cpu""" )
# Load the entity vocab file
snake_case_ = load_entity_vocab(_SCREAMING_SNAKE_CASE )
snake_case_ = RobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] )
# Add special tokens to the token vocabulary for downstream tasks
snake_case_ = AddedToken("""<ent>""" , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE )
snake_case_ = AddedToken("""<ent2>""" , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE )
tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
with open(os.path.join(_SCREAMING_SNAKE_CASE , LukeTokenizer.vocab_files_names["""entity_vocab_file"""] ) , """w""" ) as f:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ = LukeTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
# Initialize the embeddings of the special tokens
snake_case_ = state_dict["""embeddings.word_embeddings.weight"""]
snake_case_ = word_emb[tokenizer.convert_tokens_to_ids(["""@"""] )[0]].unsqueeze(0 )
snake_case_ = word_emb[tokenizer.convert_tokens_to_ids(["""#"""] )[0]].unsqueeze(0 )
snake_case_ = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
snake_case_ = f"""encoder.layer.{layer_index}.attention.self."""
snake_case_ = state_dict[prefix + matrix_name]
snake_case_ = state_dict[prefix + matrix_name]
snake_case_ = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
snake_case_ = state_dict["""entity_embeddings.entity_embeddings.weight"""]
snake_case_ = entity_emb[entity_vocab["""[MASK]"""]]
snake_case_ = LukeModel(config=_SCREAMING_SNAKE_CASE ).eval()
snake_case_ , snake_case_ = model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE )
if not (len(_SCREAMING_SNAKE_CASE ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(f"""Missing keys {", ".join(_SCREAMING_SNAKE_CASE )}. Expected only missing embeddings.position_ids""" )
if not (all(key.startswith("""entity_predictions""" ) or key.startswith("""lm_head""" ) for key in unexpected_keys )):
raise ValueError(
"""Unexpected keys"""
f""" {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}""" )
# Check outputs
snake_case_ = LukeTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE , task="""entity_classification""" )
snake_case_ = (
"""Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the"""
""" new world number one avoid a humiliating second- round exit at Wimbledon ."""
)
snake_case_ = (39, 42)
snake_case_ = tokenizer(_SCREAMING_SNAKE_CASE , entity_spans=[span] , add_prefix_space=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
snake_case_ = model(**_SCREAMING_SNAKE_CASE )
# Verify word hidden states
if model_size == "large":
snake_case_ = torch.Size((1, 42, 1_024) )
snake_case_ = torch.tensor(
[[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]] )
else: # base
snake_case_ = torch.Size((1, 42, 768) )
snake_case_ = torch.tensor([[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
snake_case_ = torch.Size((1, 1, 1_024) )
snake_case_ = torch.tensor([[0.0466, -0.0106, -0.0179]] )
else: # base
snake_case_ = torch.Size((1, 1, 768) )
snake_case_ = torch.tensor([[0.1457, 0.1044, 0.0174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
f"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
f""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print("""Saving PyTorch model to {}""".format(_SCREAMING_SNAKE_CASE ) )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
def _a ( _SCREAMING_SNAKE_CASE ) -> List[str]:
snake_case_ = {}
with open(_SCREAMING_SNAKE_CASE , """r""" , encoding="""utf-8""" ) as f:
for index, line in enumerate(_SCREAMING_SNAKE_CASE ):
snake_case_ , snake_case_ = line.rstrip().split("""\t""" )
snake_case_ = index
return entity_vocab
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
__SCREAMING_SNAKE_CASE : int = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 347 |
"""simple docstring"""
import datasets
__SCREAMING_SNAKE_CASE : Tuple = '\\n@InProceedings{conneau2018xnli,\n author = "Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin",\n title = "XNLI: Evaluating Cross-lingual Sentence Representations",\n booktitle = "Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing",\n year = "2018",\n publisher = "Association for Computational Linguistics",\n location = "Brussels, Belgium",\n}\n'
__SCREAMING_SNAKE_CASE : Dict = '\\nXNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n'
__SCREAMING_SNAKE_CASE : List[str] = '\nComputes XNLI score which is just simple accuracy.\nArgs:\n predictions: Predicted labels.\n references: Ground truth labels.\nReturns:\n \'accuracy\': accuracy\nExamples:\n\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> xnli_metric = datasets.load_metric("xnli")\n >>> results = xnli_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n'
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class __A (datasets.Metric):
'''simple docstring'''
def lowerCAmelCase ( self : str ) ->Any:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int64""" if self.config_name != """sts-b""" else """float32""" ),
"""references""": datasets.Value("""int64""" if self.config_name != """sts-b""" else """float32""" ),
} ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" , )
def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any ) ->int:
"""simple docstring"""
return {"accuracy": simple_accuracy(UpperCAmelCase_ , UpperCAmelCase_ )}
| 347 | 1 |
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a :
def __init__( self :Any ,__lowercase :Tuple ,__lowercase :str=1_3 ,__lowercase :Optional[int]=3_2 ,__lowercase :Any=2 ,__lowercase :Dict=3 ,__lowercase :Optional[Any]=1_6 ,__lowercase :List[str]=[3_2, 6_4, 1_2_8] ,__lowercase :str=[1, 2, 1] ,__lowercase :Optional[Any]=[2, 2, 4] ,__lowercase :Tuple=2 ,__lowercase :List[Any]=2.0 ,__lowercase :Tuple=True ,__lowercase :Optional[int]=0.0 ,__lowercase :str=0.0 ,__lowercase :str=0.1 ,__lowercase :str="gelu" ,__lowercase :Union[str, Any]=False ,__lowercase :Tuple=True ,__lowercase :Union[str, Any]=0.02 ,__lowercase :Optional[Any]=1e-5 ,__lowercase :str=True ,__lowercase :Union[str, Any]=None ,__lowercase :int=True ,__lowercase :List[str]=1_0 ,__lowercase :Dict=8 ,__lowercase :Optional[int]=["stage1", "stage2"] ,__lowercase :Union[str, Any]=[1, 2] ,):
snake_case__ : Any = parent
snake_case__ : Dict = batch_size
snake_case__ : List[Any] = image_size
snake_case__ : List[str] = patch_size
snake_case__ : Any = num_channels
snake_case__ : Optional[int] = embed_dim
snake_case__ : str = hidden_sizes
snake_case__ : Dict = depths
snake_case__ : Any = num_heads
snake_case__ : Optional[Any] = window_size
snake_case__ : Union[str, Any] = mlp_ratio
snake_case__ : Dict = qkv_bias
snake_case__ : Optional[int] = hidden_dropout_prob
snake_case__ : Any = attention_probs_dropout_prob
snake_case__ : Dict = drop_path_rate
snake_case__ : int = hidden_act
snake_case__ : Optional[Any] = use_absolute_embeddings
snake_case__ : Optional[Any] = patch_norm
snake_case__ : List[Any] = layer_norm_eps
snake_case__ : List[str] = initializer_range
snake_case__ : int = is_training
snake_case__ : Tuple = scope
snake_case__ : Any = use_labels
snake_case__ : Any = type_sequence_label_size
snake_case__ : Optional[Any] = encoder_stride
snake_case__ : Tuple = out_features
snake_case__ : str = out_indices
def __lowerCamelCase ( self :Tuple ):
snake_case__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : str = None
if self.use_labels:
snake_case__ : Optional[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
snake_case__ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def __lowerCamelCase ( self :int ):
return FocalNetConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,out_features=self.out_features ,out_indices=self.out_indices ,)
def __lowerCamelCase ( self :Union[str, Any] ,__lowercase :str ,__lowercase :str ,__lowercase :Optional[Any] ):
snake_case__ : Optional[Any] = FocalNetModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
snake_case__ : List[str] = model(UpperCamelCase__ )
snake_case__ : str = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
snake_case__ : Any = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) )
def __lowerCamelCase ( self :List[Any] ,__lowercase :Optional[Any] ,__lowercase :int ,__lowercase :Dict ):
snake_case__ : Dict = FocalNetBackbone(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
snake_case__ : int = model(UpperCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) )
self.parent.assertListEqual(model.channels ,config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
snake_case__ : Dict = None
snake_case__ : str = FocalNetBackbone(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
snake_case__ : List[str] = model(UpperCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,1 )
self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] )
def __lowerCamelCase ( self :Tuple ,__lowercase :Any ,__lowercase :List[str] ,__lowercase :List[Any] ):
snake_case__ : Optional[int] = FocalNetForMaskedImageModeling(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
snake_case__ : Optional[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(
result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
snake_case__ : Tuple = 1
snake_case__ : Union[str, Any] = FocalNetForMaskedImageModeling(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
snake_case__ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case__ : Tuple = model(UpperCamelCase__ )
self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def __lowerCamelCase ( self :List[str] ,__lowercase :Dict ,__lowercase :Union[str, Any] ,__lowercase :Union[str, Any] ):
snake_case__ : List[Any] = self.type_sequence_label_size
snake_case__ : Dict = FocalNetForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
snake_case__ : Any = model(UpperCamelCase__ ,labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case__ : Tuple = 1
snake_case__ : Tuple = FocalNetForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
snake_case__ : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case__ : Optional[int] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def __lowerCamelCase ( self :Dict ):
snake_case__ : int = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ : int = config_and_inputs
snake_case__ : Union[str, Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class a ( __a , __a , unittest.TestCase ):
__lowerCAmelCase : str = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
__lowerCAmelCase : Tuple = (
{"""feature-extraction""": FocalNetModel, """image-classification""": FocalNetForImageClassification}
if is_torch_available()
else {}
)
__lowerCAmelCase : List[str] = False
__lowerCAmelCase : Optional[Any] = False
__lowerCAmelCase : Tuple = False
__lowerCAmelCase : List[Any] = False
__lowerCAmelCase : Optional[Any] = False
def __lowerCamelCase ( self :int ):
snake_case__ : Optional[Any] = FocalNetModelTester(self )
snake_case__ : List[Any] = ConfigTester(self ,config_class=UpperCamelCase__ ,embed_dim=3_7 ,has_text_modality=UpperCamelCase__ )
def __lowerCamelCase ( self :Tuple ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCamelCase ( self :str ):
return
def __lowerCamelCase ( self :Any ):
snake_case__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def __lowerCamelCase ( self :str ):
snake_case__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCamelCase__ )
def __lowerCamelCase ( self :Dict ):
snake_case__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCamelCase__ )
def __lowerCamelCase ( self :Dict ):
snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
@unittest.skip(reason='''FocalNet does not use inputs_embeds''' )
def __lowerCamelCase ( self :Tuple ):
pass
@unittest.skip(reason='''FocalNet does not use feedforward chunking''' )
def __lowerCamelCase ( self :Dict ):
pass
def __lowerCamelCase ( self :List[str] ):
snake_case__ , snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
snake_case__ : List[Any] = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
snake_case__ : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ ,nn.Linear ) )
def __lowerCamelCase ( self :str ):
snake_case__ , snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
snake_case__ : str = model_class(UpperCamelCase__ )
snake_case__ : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : Optional[int] = [*signature.parameters.keys()]
snake_case__ : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,UpperCamelCase__ )
def __lowerCamelCase ( self :List[str] ,__lowercase :Any ,__lowercase :Any ,__lowercase :Dict ,__lowercase :Union[str, Any] ):
snake_case__ : Dict = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
snake_case__ : Dict = model(**self._prepare_for_class(UpperCamelCase__ ,UpperCamelCase__ ) )
snake_case__ : Tuple = outputs.hidden_states
snake_case__ : str = getattr(
self.model_tester ,'''expected_num_hidden_layers''' ,len(self.model_tester.depths ) + 1 )
self.assertEqual(len(UpperCamelCase__ ) ,UpperCamelCase__ )
# FocalNet has a different seq_length
snake_case__ : Union[str, Any] = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
snake_case__ : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
snake_case__ : Any = outputs.reshaped_hidden_states
self.assertEqual(len(UpperCamelCase__ ) ,UpperCamelCase__ )
snake_case__ , snake_case__ , snake_case__ , snake_case__ : str = reshaped_hidden_states[0].shape
snake_case__ : Optional[Any] = (
reshaped_hidden_states[0].view(UpperCamelCase__ ,UpperCamelCase__ ,height * width ).permute(0 ,2 ,1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
def __lowerCamelCase ( self :Dict ):
snake_case__ , snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Any = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
snake_case__ : Tuple = True
self.check_hidden_states_output(UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ : List[str] = True
self.check_hidden_states_output(UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ )
def __lowerCamelCase ( self :Union[str, Any] ):
snake_case__ , snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Union[str, Any] = 3
snake_case__ : Union[str, Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
snake_case__ : List[Any] = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
snake_case__ : List[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
snake_case__ : int = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
snake_case__ : str = True
self.check_hidden_states_output(UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ : Optional[Any] = True
self.check_hidden_states_output(UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,(padded_height, padded_width) )
@slow
def __lowerCamelCase ( self :Union[str, Any] ):
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : Tuple = FocalNetModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def __lowerCamelCase ( self :int ):
snake_case__ , snake_case__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Optional[int] = _config_zero_init(UpperCamelCase__ )
for model_class in self.all_model_classes:
snake_case__ : Dict = model_class(config=UpperCamelCase__ )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" ,)
@require_vision
@require_torch
class a ( unittest.TestCase ):
@cached_property
def __lowerCamelCase ( self :str ):
return AutoImageProcessor.from_pretrained('''microsoft/focalnet-tiny''' ) if is_vision_available() else None
@slow
def __lowerCamelCase ( self :List[Any] ):
snake_case__ : Dict = FocalNetForImageClassification.from_pretrained('''microsoft/focalnet-tiny''' ).to(UpperCamelCase__ )
snake_case__ : List[str] = self.default_image_processor
snake_case__ : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
snake_case__ : Dict = image_processor(images=UpperCamelCase__ ,return_tensors='''pt''' ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
snake_case__ : List[str] = model(**UpperCamelCase__ )
# verify the logits
snake_case__ : str = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape ,UpperCamelCase__ )
snake_case__ : Optional[int] = torch.tensor([0.2166, -0.4368, 0.2191] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,UpperCamelCase__ ,atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() ,2_8_1 )
@require_torch
class a ( __a , unittest.TestCase ):
__lowerCAmelCase : List[Any] = (FocalNetBackbone,) if is_torch_available() else ()
__lowerCAmelCase : Optional[int] = FocalNetConfig
__lowerCAmelCase : Optional[int] = False
def __lowerCamelCase ( self :Optional[int] ):
snake_case__ : Dict = FocalNetModelTester(self )
| 368 |
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
A__ = logging.get_logger(__name__)
A__ = {
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/config.json''',
# See all BART models at https://huggingface.co/models?filter=bart
}
class a ( __lowerCamelCase ):
__lowerCAmelCase : List[str] = """bart"""
__lowerCAmelCase : Any = ["""past_key_values"""]
__lowerCAmelCase : List[Any] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self :int ,__lowercase :Union[str, Any]=5_0_2_6_5 ,__lowercase :Optional[int]=1_0_2_4 ,__lowercase :int=1_2 ,__lowercase :Tuple=4_0_9_6 ,__lowercase :str=1_6 ,__lowercase :List[Any]=1_2 ,__lowercase :str=4_0_9_6 ,__lowercase :List[str]=1_6 ,__lowercase :Optional[int]=0.0 ,__lowercase :List[str]=0.0 ,__lowercase :int="gelu" ,__lowercase :int=1_0_2_4 ,__lowercase :Any=0.1 ,__lowercase :Optional[Any]=0.0 ,__lowercase :List[Any]=0.0 ,__lowercase :Tuple=0.02 ,__lowercase :List[str]=0.0 ,__lowercase :int=False ,__lowercase :Any=True ,__lowercase :List[str]=3 ,__lowercase :List[Any]=1 ,__lowercase :List[str]=0 ,__lowercase :List[str]=2 ,__lowercase :Union[str, Any]=True ,__lowercase :List[Any]=2 ,__lowercase :Dict=2 ,**__lowercase :List[str] ,):
snake_case__ : Union[str, Any] = vocab_size
snake_case__ : Tuple = max_position_embeddings
snake_case__ : List[Any] = d_model
snake_case__ : Any = encoder_ffn_dim
snake_case__ : int = encoder_layers
snake_case__ : Union[str, Any] = encoder_attention_heads
snake_case__ : List[str] = decoder_ffn_dim
snake_case__ : Any = decoder_layers
snake_case__ : Union[str, Any] = decoder_attention_heads
snake_case__ : int = dropout
snake_case__ : Optional[int] = attention_dropout
snake_case__ : str = activation_dropout
snake_case__ : List[Any] = activation_function
snake_case__ : Any = init_std
snake_case__ : Union[str, Any] = encoder_layerdrop
snake_case__ : Optional[int] = decoder_layerdrop
snake_case__ : List[Any] = classifier_dropout
snake_case__ : Tuple = use_cache
snake_case__ : List[str] = encoder_layers
snake_case__ : Dict = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=__lowercase ,pad_token_id=__lowercase ,bos_token_id=__lowercase ,eos_token_id=__lowercase ,is_encoder_decoder=__lowercase ,decoder_start_token_id=__lowercase ,forced_eos_token_id=__lowercase ,**__lowercase ,)
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' ,__lowercase ):
snake_case__ : int = self.bos_token_id
warnings.warn(
F"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """
'''The config can simply be saved and uploaded again to be fixed.''' )
class a ( __lowerCamelCase ):
@property
def __lowerCamelCase ( self :Optional[int] ):
if self.task in ["default", "seq2seq-lm"]:
snake_case__ : Optional[Any] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
snake_case__ : Union[str, Any] = {0: '''batch'''}
snake_case__ : Any = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
snake_case__ : Any = {0: '''batch''', 1: '''decoder_sequence'''}
snake_case__ : Dict = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(__lowercase ,direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
snake_case__ : List[str] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
snake_case__ , snake_case__ : Dict = self.num_layers
for i in range(__lowercase ):
snake_case__ : int = {0: '''batch''', 2: '''past_sequence + sequence'''}
snake_case__ : Tuple = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
snake_case__ : Optional[Any] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def __lowerCamelCase ( self :Dict ):
if self.task in ["default", "seq2seq-lm"]:
snake_case__ : List[str] = super().outputs
else:
snake_case__ : List[str] = super(__lowercase ,self ).outputs
if self.use_past:
snake_case__ , snake_case__ : Any = self.num_layers
for i in range(__lowercase ):
snake_case__ : Optional[int] = {0: '''batch''', 2: '''past_sequence + sequence'''}
snake_case__ : List[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def __lowerCamelCase ( self :Optional[Any] ,__lowercase :PreTrainedTokenizer ,__lowercase :int = -1 ,__lowercase :int = -1 ,__lowercase :bool = False ,__lowercase :Optional[TensorType] = None ,):
snake_case__ : Tuple = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase )
# Generate decoder inputs
snake_case__ : List[Any] = seq_length if not self.use_past else 1
snake_case__ : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase )
snake_case__ : Any = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
snake_case__ : List[str] = dict(**__lowercase ,**__lowercase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
snake_case__ , snake_case__ : Union[str, Any] = common_inputs['''input_ids'''].shape
snake_case__ : List[str] = common_inputs['''decoder_input_ids'''].shape[1]
snake_case__ , snake_case__ : Dict = self.num_attention_heads
snake_case__ : List[str] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
snake_case__ : Optional[int] = decoder_seq_length + 3
snake_case__ : Dict = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
snake_case__ : List[Any] = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(__lowercase ,__lowercase )] ,dim=1 )
snake_case__ : Any = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
snake_case__ , snake_case__ : List[Any] = self.num_layers
snake_case__ : List[Any] = min(__lowercase ,__lowercase )
snake_case__ : Dict = max(__lowercase ,__lowercase ) - min_num_layers
snake_case__ : Union[str, Any] = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(__lowercase ):
common_inputs["past_key_values"].append(
(
torch.zeros(__lowercase ),
torch.zeros(__lowercase ),
torch.zeros(__lowercase ),
torch.zeros(__lowercase ),
) )
# TODO: test this.
snake_case__ : Optional[Any] = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(__lowercase ,__lowercase ):
common_inputs["past_key_values"].append((torch.zeros(__lowercase ), torch.zeros(__lowercase )) )
return common_inputs
def __lowerCamelCase ( self :List[Any] ,__lowercase :PreTrainedTokenizer ,__lowercase :int = -1 ,__lowercase :int = -1 ,__lowercase :bool = False ,__lowercase :Optional[TensorType] = None ,):
snake_case__ : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
snake_case__ , snake_case__ : str = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
snake_case__ : Dict = seqlen + 2
snake_case__ , snake_case__ : Tuple = self.num_layers
snake_case__ , snake_case__ : List[str] = self.num_attention_heads
snake_case__ : Optional[Any] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
snake_case__ : int = common_inputs['''attention_mask'''].dtype
snake_case__ : int = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(__lowercase ,__lowercase ,dtype=__lowercase )] ,dim=1 )
snake_case__ : Union[str, Any] = [
(torch.zeros(__lowercase ), torch.zeros(__lowercase )) for _ in range(__lowercase )
]
return common_inputs
def __lowerCamelCase ( self :str ,__lowercase :PreTrainedTokenizer ,__lowercase :int = -1 ,__lowercase :int = -1 ,__lowercase :bool = False ,__lowercase :Optional[TensorType] = None ,):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
snake_case__ : Optional[int] = compute_effective_axis_dimension(
__lowercase ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
snake_case__ : str = tokenizer.num_special_tokens_to_add(__lowercase )
snake_case__ : int = compute_effective_axis_dimension(
__lowercase ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=__lowercase )
# Generate dummy inputs according to compute batch and sequence
snake_case__ : Union[str, Any] = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
snake_case__ : Optional[Any] = dict(tokenizer(__lowercase ,return_tensors=__lowercase ) )
return common_inputs
def __lowerCamelCase ( self :int ,__lowercase :PreTrainedTokenizer ,__lowercase :int = -1 ,__lowercase :int = -1 ,__lowercase :bool = False ,__lowercase :Optional[TensorType] = None ,):
if self.task in ["default", "seq2seq-lm"]:
snake_case__ : str = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__lowercase ,batch_size=__lowercase ,seq_length=__lowercase ,is_pair=__lowercase ,framework=__lowercase )
elif self.task == "causal-lm":
snake_case__ : int = self._generate_dummy_inputs_for_causal_lm(
__lowercase ,batch_size=__lowercase ,seq_length=__lowercase ,is_pair=__lowercase ,framework=__lowercase )
else:
snake_case__ : int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowercase ,batch_size=__lowercase ,seq_length=__lowercase ,is_pair=__lowercase ,framework=__lowercase )
return common_inputs
def __lowerCamelCase ( self :Tuple ,__lowercase :Optional[int] ,__lowercase :List[str] ,__lowercase :Optional[int] ,__lowercase :Tuple ):
if self.task in ["default", "seq2seq-lm"]:
snake_case__ : Optional[Any] = super()._flatten_past_key_values_(__lowercase ,__lowercase ,__lowercase ,__lowercase )
else:
snake_case__ : int = super(__lowercase ,self )._flatten_past_key_values_(
__lowercase ,__lowercase ,__lowercase ,__lowercase )
| 44 | 0 |
'''simple docstring'''
import warnings
from functools import wraps
from typing import Callable
def UpperCamelCase_( snake_case : Callable ):
'''simple docstring'''
@wraps(snake_case )
def _inner_fn(*snake_case : Optional[int] , **snake_case : List[Any] ):
warnings.warn(
(f'\'{fn.__name__}\' is experimental and might be subject to breaking changes in the future.') , snake_case , )
return fn(*snake_case , **snake_case )
return _inner_fn
| 85 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_lowerCamelCase : str = logging.get_logger(__name__)
_lowerCamelCase : Optional[int] = {
"Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json",
"Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json",
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json",
"Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json",
"Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json",
"Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json",
"Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json",
"Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json",
"Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json",
"Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json",
"Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json",
"Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json",
}
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = """codegen"""
UpperCamelCase = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Any, __A : Optional[int]=5_0_4_0_0, __A : Tuple=2_0_4_8, __A : Optional[int]=2_0_4_8, __A : List[str]=4_0_9_6, __A : List[str]=2_8, __A : Union[str, Any]=1_6, __A : Tuple=6_4, __A : Union[str, Any]=None, __A : Union[str, Any]="gelu_new", __A : Any=0.0, __A : Dict=0.0, __A : str=0.0, __A : Optional[int]=1E-5, __A : Any=0.0_2, __A : Any=True, __A : Union[str, Any]=5_0_2_5_6, __A : List[str]=5_0_2_5_6, __A : int=False, **__A : List[Any], ):
UpperCAmelCase : int = vocab_size
UpperCAmelCase : Tuple = n_ctx
UpperCAmelCase : Tuple = n_positions
UpperCAmelCase : Optional[int] = n_embd
UpperCAmelCase : Union[str, Any] = n_layer
UpperCAmelCase : List[str] = n_head
UpperCAmelCase : Tuple = n_inner
UpperCAmelCase : int = rotary_dim
UpperCAmelCase : List[Any] = activation_function
UpperCAmelCase : List[str] = resid_pdrop
UpperCAmelCase : Optional[Any] = embd_pdrop
UpperCAmelCase : str = attn_pdrop
UpperCAmelCase : Tuple = layer_norm_epsilon
UpperCAmelCase : Dict = initializer_range
UpperCAmelCase : Union[str, Any] = use_cache
UpperCAmelCase : Any = bos_token_id
UpperCAmelCase : List[str] = eos_token_id
super().__init__(
bos_token_id=__A, eos_token_id=__A, tie_word_embeddings=__A, **__A )
class __UpperCAmelCase ( lowerCamelCase__ ):
def __init__( self : Any, __A : PretrainedConfig, __A : str = "default", __A : List[PatchingSpec] = None, __A : bool = False, ):
super().__init__(__A, task=__A, patching_specs=__A, use_past=__A )
if not getattr(self._config, '''pad_token_id''', __A ):
# TODO: how to do that better?
UpperCAmelCase : Union[str, Any] = 0
@property
def __magic_name__ ( self : str ):
UpperCAmelCase : Union[str, Any] = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(__A, direction='''inputs''' )
UpperCAmelCase : int = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
UpperCAmelCase : List[Any] = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def __magic_name__ ( self : Dict ):
return self._config.n_layer
@property
def __magic_name__ ( self : List[str] ):
return self._config.n_head
def __magic_name__ ( self : str, __A : PreTrainedTokenizer, __A : int = -1, __A : int = -1, __A : bool = False, __A : Optional[TensorType] = None, ):
UpperCAmelCase : Union[str, Any] = super(__A, self ).generate_dummy_inputs(
__A, batch_size=__A, seq_length=__A, is_pair=__A, framework=__A )
# We need to order the input in the way they appears in the forward()
UpperCAmelCase : Union[str, Any] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
UpperCAmelCase , UpperCAmelCase : str = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
UpperCAmelCase : str = seqlen + 2
UpperCAmelCase : Optional[int] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
UpperCAmelCase : Optional[int] = [
(torch.zeros(__A ), torch.zeros(__A )) for _ in range(self.num_layers )
]
UpperCAmelCase : Union[str, Any] = common_inputs['''attention_mask''']
if self.use_past:
UpperCAmelCase : Optional[Any] = ordered_inputs['''attention_mask'''].dtype
UpperCAmelCase : Dict = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(__A, __A, dtype=__A )], dim=1 )
return ordered_inputs
@property
def __magic_name__ ( self : Tuple ):
return 1_3
| 336 | 0 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( __A ) -> list:
if len(__A ) < 2:
return collection
def circle_sort_util(__A , __A , __A ) -> bool:
_snake_case = False
if low == high:
return swapped
_snake_case = low
_snake_case = high
while left < right:
if collection[left] > collection[right]:
_snake_case , _snake_case = (
collection[right],
collection[left],
)
_snake_case = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
_snake_case , _snake_case = (
collection[right + 1],
collection[left],
)
_snake_case = True
_snake_case = low + int((high - low) / 2 )
_snake_case = circle_sort_util(__A , __A , __A )
_snake_case = circle_sort_util(__A , mid + 1 , __A )
return swapped or left_swap or right_swap
_snake_case = True
while is_not_sorted is True:
_snake_case = circle_sort_util(__A , 0 , len(__A ) - 1 )
return collection
if __name__ == "__main__":
lowercase : str = input("Enter numbers separated by a comma:\n").strip()
lowercase : List[Any] = [int(item) for item in user_input.split(",")]
print(circle_sort(unsorted))
| 160 |
'''simple docstring'''
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
lowercase : int = NewType("DataClass", Any)
lowercase : Dict = NewType("DataClassType", Any)
def SCREAMING_SNAKE_CASE__ ( __A ) -> Optional[Any]:
if isinstance(__A , __A ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F'Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).' )
def SCREAMING_SNAKE_CASE__ ( __A ) -> Callable[[str], Any]:
_snake_case = {str(__A ): choice for choice in choices}
return lambda __A : str_to_choice.get(__A , __A )
def SCREAMING_SNAKE_CASE__ ( *,
__A = None , __A = None , __A = dataclasses.MISSING , __A = dataclasses.MISSING , __A = None , **__A , ) -> dataclasses.Field:
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
_snake_case = {}
if aliases is not None:
_snake_case = aliases
if help is not None:
_snake_case = help
return dataclasses.field(metadata=__A , default=__A , default_factory=__A , **__A )
class __UpperCAmelCase ( _lowerCamelCase ):
__lowercase = 42
def __init__( self , lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
if "formatter_class" not in kwargs:
_snake_case = ArgumentDefaultsHelpFormatter
super().__init__(**lowerCAmelCase_ )
if dataclasses.is_dataclass(lowerCAmelCase_ ):
_snake_case = [dataclass_types]
_snake_case = list(lowerCAmelCase_ )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(lowerCAmelCase_ )
@staticmethod
def lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = F'--{field.name}'
_snake_case = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , lowerCAmelCase_ ):
raise RuntimeError(
'Unresolved type detected, which should have been done with the help of '
'`typing.get_type_hints` method by default' )
_snake_case = kwargs.pop('aliases' , [] )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = [aliases]
_snake_case = getattr(field.type , '__origin__' , field.type )
if origin_type is Union or (hasattr(lowerCAmelCase_ , 'UnionType' ) and isinstance(lowerCAmelCase_ , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(lowerCAmelCase_ ) not in field.type.__args__
):
raise ValueError(
'Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'
' the argument parser only supports one type per argument.'
F' Problem encountered in field \'{field.name}\'.' )
if type(lowerCAmelCase_ ) not in field.type.__args__:
# filter `str` in Union
_snake_case = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
_snake_case = getattr(field.type , '__origin__' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
_snake_case = (
field.type.__args__[0] if isinstance(lowerCAmelCase_ , field.type.__args__[1] ) else field.type.__args__[1]
)
_snake_case = getattr(field.type , '__origin__' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
_snake_case = {}
if origin_type is Literal or (isinstance(field.type , lowerCAmelCase_ ) and issubclass(field.type , lowerCAmelCase_ )):
if origin_type is Literal:
_snake_case = field.type.__args__
else:
_snake_case = [x.value for x in field.type]
_snake_case = make_choice_type_function(kwargs['choices'] )
if field.default is not dataclasses.MISSING:
_snake_case = field.default
else:
_snake_case = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
_snake_case = copy(lowerCAmelCase_ )
# Hack because type=bool in argparse does not behave as we want.
_snake_case = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
_snake_case = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
_snake_case = default
# This tells argparse we accept 0 or 1 value after --field_name
_snake_case = '?'
# This is the value that will get picked if we do --field_name (without value)
_snake_case = True
elif isclass(lowerCAmelCase_ ) and issubclass(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = field.type.__args__[0]
_snake_case = '+'
if field.default_factory is not dataclasses.MISSING:
_snake_case = field.default_factory()
elif field.default is dataclasses.MISSING:
_snake_case = True
else:
_snake_case = field.type
if field.default is not dataclasses.MISSING:
_snake_case = field.default
elif field.default_factory is not dataclasses.MISSING:
_snake_case = field.default_factory()
else:
_snake_case = True
parser.add_argument(lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
_snake_case = False
parser.add_argument(F'--no_{field.name}' , action='store_false' , dest=field.name , **lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
if hasattr(lowerCAmelCase_ , '_argument_group_name' ):
_snake_case = self.add_argument_group(dtype._argument_group_name )
else:
_snake_case = self
try:
_snake_case = get_type_hints(lowerCAmelCase_ )
except NameError:
raise RuntimeError(
F'Type resolution failed for {dtype}. Try declaring the class in global scope or '
'removing line of `from __future__ import annotations` which opts in Postponed '
'Evaluation of Annotations (PEP 563)' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(lowerCAmelCase_ ):
_snake_case = '.'.join(map(lowerCAmelCase_ , sys.version_info[:3] ) )
raise RuntimeError(
F'Type resolution failed for {dtype} on Python {python_version}. Try removing '
'line of `from __future__ import annotations` which opts in union types as '
'`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '
'support Python versions that lower than 3.10, you need to use '
'`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '
'`X | None`.' ) from ex
raise
for field in dataclasses.fields(lowerCAmelCase_ ):
if not field.init:
continue
_snake_case = type_hints[field.name]
self._parse_dataclass_field(lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_=None , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_=None , lowerCAmelCase_=None , ):
"""simple docstring"""
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
_snake_case = []
if args_filename:
args_files.append(Path(lowerCAmelCase_ ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('.args' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
_snake_case = ArgumentParser()
args_file_parser.add_argument(lowerCAmelCase_ , type=lowerCAmelCase_ , action='append' )
# Use only remaining args for further parsing (remove the args_file_flag)
_snake_case , _snake_case = args_file_parser.parse_known_args(args=lowerCAmelCase_ )
_snake_case = vars(lowerCAmelCase_ ).get(args_file_flag.lstrip('-' ) , lowerCAmelCase_ )
if cmd_args_file_paths:
args_files.extend([Path(lowerCAmelCase_ ) for p in cmd_args_file_paths] )
_snake_case = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
_snake_case = file_args + args if args is not None else file_args + sys.argv[1:]
_snake_case , _snake_case = self.parse_known_args(args=lowerCAmelCase_ )
_snake_case = []
for dtype in self.dataclass_types:
_snake_case = {f.name for f in dataclasses.fields(lowerCAmelCase_ ) if f.init}
_snake_case = {k: v for k, v in vars(lowerCAmelCase_ ).items() if k in keys}
for k in keys:
delattr(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = dtype(**lowerCAmelCase_ )
outputs.append(lowerCAmelCase_ )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(lowerCAmelCase_ )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F'Some specified arguments are not used by the HfArgumentParser: {remaining_args}' )
return (*outputs,)
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = False ):
"""simple docstring"""
_snake_case = set(args.keys() )
_snake_case = []
for dtype in self.dataclass_types:
_snake_case = {f.name for f in dataclasses.fields(lowerCAmelCase_ ) if f.init}
_snake_case = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
_snake_case = dtype(**lowerCAmelCase_ )
outputs.append(lowerCAmelCase_ )
if not allow_extra_keys and unused_keys:
raise ValueError(F'Some keys are not used by the HfArgumentParser: {sorted(lowerCAmelCase_ )}' )
return tuple(lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = False ):
"""simple docstring"""
with open(Path(lowerCAmelCase_ ) , encoding='utf-8' ) as open_json_file:
_snake_case = json.loads(open_json_file.read() )
_snake_case = self.parse_dict(lowerCAmelCase_ , allow_extra_keys=lowerCAmelCase_ )
return tuple(lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = False ):
"""simple docstring"""
_snake_case = self.parse_dict(yaml.safe_load(Path(lowerCAmelCase_ ).read_text() ) , allow_extra_keys=lowerCAmelCase_ )
return tuple(lowerCAmelCase_ )
| 160 | 1 |
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
__A : Tuple = re.compile(R'\s+')
def __UpperCamelCase ( _A : Optional[Any] ) ->Optional[Any]:
"""simple docstring"""
return {"hash": hashlib.mda(re.sub(_A , """""" , example["""content"""] ).encode("""utf-8""" ) ).hexdigest()}
def __UpperCamelCase ( _A : Union[str, Any] ) ->Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ =[len(_A ) for line in example["""content"""].splitlines()]
return {"line_mean": np.mean(_A ), "line_max": max(_A )}
def __UpperCamelCase ( _A : int ) ->List[Any]:
"""simple docstring"""
lowerCamelCase_ =np.mean([c.isalnum() for c in example["""content"""]] )
return {"alpha_frac": alpha_frac}
def __UpperCamelCase ( _A : Any , _A : List[Any] ) ->Optional[int]:
"""simple docstring"""
if example["hash"] in uniques:
uniques.remove(example["""hash"""] )
return True
else:
return False
def __UpperCamelCase ( _A : Any , _A : str=5 ) ->int:
"""simple docstring"""
lowerCamelCase_ =["""auto-generated""", """autogenerated""", """automatically generated"""]
lowerCamelCase_ =example["""content"""].splitlines()
for _, line in zip(range(_A ) , _A ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def __UpperCamelCase ( _A : Optional[int] , _A : str=5 , _A : Union[str, Any]=0.0_5 ) ->List[Any]:
"""simple docstring"""
lowerCamelCase_ =["""unit tests""", """test file""", """configuration file"""]
lowerCamelCase_ =example["""content"""].splitlines()
lowerCamelCase_ =0
lowerCamelCase_ =0
# first test
for _, line in zip(range(_A ) , _A ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
lowerCamelCase_ =example["""content"""].count("""\n""" )
lowerCamelCase_ =int(coeff * nlines )
for line in lines:
count_config += line.lower().count("""config""" )
count_test += line.lower().count("""test""" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def __UpperCamelCase ( _A : Any ) ->Dict:
"""simple docstring"""
lowerCamelCase_ =["""def """, """class """, """for """, """while """]
lowerCamelCase_ =example["""content"""].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def __UpperCamelCase ( _A : Any , _A : List[Any]=4 ) ->Tuple:
"""simple docstring"""
lowerCamelCase_ =example["""content"""].splitlines()
lowerCamelCase_ =0
for line in lines:
counter += line.lower().count("""=""" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def __UpperCamelCase ( _A : Dict ) ->str:
"""simple docstring"""
lowerCamelCase_ =tokenizer(example["""content"""] , truncation=_A )["""input_ids"""]
lowerCamelCase_ =len(example["""content"""] ) / len(_A )
return {"ratio": ratio}
def __UpperCamelCase ( _A : Optional[Any] ) ->List[str]:
"""simple docstring"""
lowerCamelCase_ ={}
results.update(get_hash(_A ) )
results.update(line_stats(_A ) )
results.update(alpha_stats(_A ) )
results.update(char_token_ratio(_A ) )
results.update(is_autogenerated(_A ) )
results.update(is_config_or_test(_A ) )
results.update(has_no_keywords(_A ) )
results.update(has_few_assignments(_A ) )
return results
def __UpperCamelCase ( _A : str , _A : List[Any] , _A : str ) ->Tuple:
"""simple docstring"""
if not check_uniques(_A , _A ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def __UpperCamelCase ( _A : Tuple ) ->int:
"""simple docstring"""
with open(_A , """rb""" ) as f_in:
with gzip.open(str(_A ) + """.gz""" , """wb""" , compresslevel=6 ) as f_out:
shutil.copyfileobj(_A , _A )
os.unlink(_A )
# Settings
__A : int = HfArgumentParser(PreprocessingArguments)
__A : int = parser.parse_args()
if args.num_workers is None:
__A : List[Any] = multiprocessing.cpu_count()
__A : str = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
__A : Dict = time.time()
__A : List[str] = load_dataset(args.dataset_name, split='train')
print(F"""Time to load dataset: {time.time()-t_start:.2f}""")
# Run preprocessing
__A : str = time.time()
__A : List[str] = ds.map(preprocess, num_proc=args.num_workers)
print(F"""Time to preprocess dataset: {time.time()-t_start:.2f}""")
# Deduplicate hashes
__A : List[str] = set(ds.unique('hash'))
__A : Union[str, Any] = len(uniques) / len(ds)
print(F"""Fraction of duplicates: {1-frac:.2%}""")
# Deduplicate data and apply heuristics
__A : Tuple = time.time()
__A : Dict = ds.filter(filter, fn_kwargs={'uniques': uniques, 'args': args})
print(F"""Time to filter dataset: {time.time()-t_start:.2f}""")
print(F"""Size of filtered dataset: {len(ds_filter)}""")
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
__A : Any = time.time()
__A, __A : Optional[Any] = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F"""Time to deduplicate dataset: {time.time()-t_start:.2f}""")
print(F"""Size of deduplicate dataset: {len(ds_filter)}""")
# Save data in batches of samples_per_file
__A : Optional[int] = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / 'duplicate_clusters.json', 'w') as f:
json.dump(duplicate_clusters, f)
__A : Any = output_dir / 'data'
data_dir.mkdir(exist_ok=True)
__A : Dict = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
__A : Optional[Any] = str(data_dir / F"""file-{file_number+1:012}.json""")
__A : List[str] = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F"""Time to save dataset: {time.time()-t_start:.2f}""")
| 154 |
def __UpperCamelCase ( _A : Dict ) ->List[str]:
"""simple docstring"""
lowerCamelCase_ =[0] * len(_A )
lowerCamelCase_ =[]
lowerCamelCase_ =[]
lowerCamelCase_ =0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_A ) ):
if indegree[i] == 0:
queue.append(_A )
while queue:
lowerCamelCase_ =queue.pop(0 )
cnt += 1
topo.append(_A )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(_A )
if cnt != len(_A ):
print("""Cycle exists""" )
else:
print(_A )
# Adjacency List of Graph
__A : List[Any] = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 154 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase ( lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = ConsistencyModelPipeline
SCREAMING_SNAKE_CASE_ : List[str] = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
SCREAMING_SNAKE_CASE_ : Any = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
SCREAMING_SNAKE_CASE_ : Any = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""output_type""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
@property
def __A ( self ) -> Dict:
SCREAMING_SNAKE_CASE = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet' , )
return unet
@property
def __A ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet_class_cond' , )
return unet
def __A ( self , lowerCAmelCase__=False ) -> Optional[Any]:
if class_cond:
SCREAMING_SNAKE_CASE = self.dummy_cond_unet
else:
SCREAMING_SNAKE_CASE = self.dummy_uncond_unet
# Default to CM multistep sampler
SCREAMING_SNAKE_CASE = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
SCREAMING_SNAKE_CASE = {
'unet': unet,
'scheduler': scheduler,
}
return components
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__=0 ) -> Optional[int]:
if str(lowerCAmelCase__ ).startswith('mps' ):
SCREAMING_SNAKE_CASE = torch.manual_seed(lowerCAmelCase__ )
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = {
'batch_size': 1,
'num_inference_steps': None,
'timesteps': [22, 0],
'generator': generator,
'output_type': 'np',
}
return inputs
def __A ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = ConsistencyModelPipeline(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = pipe(**lowerCAmelCase__ ).images
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE = np.array([0.35_72, 0.62_73, 0.40_31, 0.39_61, 0.43_21, 0.57_30, 0.52_66, 0.47_80, 0.50_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __A ( self ) -> Tuple:
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components(class_cond=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = ConsistencyModelPipeline(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = pipe(**lowerCAmelCase__ ).images
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE = np.array([0.35_72, 0.62_73, 0.40_31, 0.39_61, 0.43_21, 0.57_30, 0.52_66, 0.47_80, 0.50_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __A ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = ConsistencyModelPipeline(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = pipe(**lowerCAmelCase__ ).images
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE = np.array([0.50_04, 0.50_04, 0.49_94, 0.50_08, 0.49_76, 0.50_18, 0.49_90, 0.49_82, 0.49_87] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __A ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components(class_cond=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = ConsistencyModelPipeline(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = pipe(**lowerCAmelCase__ ).images
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE = np.array([0.50_04, 0.50_04, 0.49_94, 0.50_08, 0.49_76, 0.50_18, 0.49_90, 0.49_82, 0.49_87] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __A ( self ) -> Optional[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self , lowerCAmelCase__=0 , lowerCAmelCase__=False , lowerCAmelCase__="cpu" , lowerCAmelCase__=torch.floataa , lowerCAmelCase__=(1, 3, 64, 64) ) -> Any:
SCREAMING_SNAKE_CASE = torch.manual_seed(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = {
'num_inference_steps': None,
'timesteps': [22, 0],
'class_labels': 0,
'generator': generator,
'output_type': 'np',
}
if get_fixed_latents:
SCREAMING_SNAKE_CASE = self.get_fixed_latents(seed=lowerCAmelCase__ , device=lowerCAmelCase__ , dtype=lowerCAmelCase__ , shape=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = latents
return inputs
def __A ( self , lowerCAmelCase__=0 , lowerCAmelCase__="cpu" , lowerCAmelCase__=torch.floataa , lowerCAmelCase__=(1, 3, 64, 64) ) -> Optional[int]:
if type(lowerCAmelCase__ ) == str:
SCREAMING_SNAKE_CASE = torch.device(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = randn_tensor(lowerCAmelCase__ , generator=lowerCAmelCase__ , device=lowerCAmelCase__ , dtype=lowerCAmelCase__ )
return latents
def __A ( self ) -> Any:
SCREAMING_SNAKE_CASE = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
SCREAMING_SNAKE_CASE = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
SCREAMING_SNAKE_CASE = ConsistencyModelPipeline(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ )
pipe.to(torch_device=lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = self.get_inputs()
SCREAMING_SNAKE_CASE = pipe(**lowerCAmelCase__ ).images
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE = np.array([0.08_88, 0.08_81, 0.06_66, 0.04_79, 0.02_92, 0.01_95, 0.02_01, 0.01_63, 0.02_54] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def __A ( self ) -> int:
SCREAMING_SNAKE_CASE = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
SCREAMING_SNAKE_CASE = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
SCREAMING_SNAKE_CASE = ConsistencyModelPipeline(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ )
pipe.to(torch_device=lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = self.get_inputs()
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = pipe(**lowerCAmelCase__ ).images
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE = np.array([0.03_40, 0.01_52, 0.00_63, 0.02_67, 0.02_21, 0.01_07, 0.04_16, 0.01_86, 0.02_17] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
@require_torch_a
def __A ( self ) -> Dict:
SCREAMING_SNAKE_CASE = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
SCREAMING_SNAKE_CASE = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
SCREAMING_SNAKE_CASE = ConsistencyModelPipeline(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ )
pipe.to(torch_device=lowerCAmelCase__ , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = self.get_inputs(get_fixed_latents=lowerCAmelCase__ , device=lowerCAmelCase__ )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=lowerCAmelCase__ , enable_math=lowerCAmelCase__ , enable_mem_efficient=lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE = pipe(**lowerCAmelCase__ ).images
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE = np.array([0.18_75, 0.14_28, 0.12_89, 0.21_51, 0.20_92, 0.14_77, 0.18_77, 0.16_41, 0.13_53] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@require_torch_a
def __A ( self ) -> int:
SCREAMING_SNAKE_CASE = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
SCREAMING_SNAKE_CASE = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
SCREAMING_SNAKE_CASE = ConsistencyModelPipeline(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ )
pipe.to(torch_device=lowerCAmelCase__ , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = self.get_inputs(get_fixed_latents=lowerCAmelCase__ , device=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=lowerCAmelCase__ , enable_math=lowerCAmelCase__ , enable_mem_efficient=lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE = pipe(**lowerCAmelCase__ ).images
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE = np.array([0.16_63, 0.19_48, 0.22_75, 0.16_80, 0.12_04, 0.12_45, 0.18_58, 0.13_38, 0.20_95] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 363 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowerCAmelCase ( lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = ShapEImgaImgPipeline
SCREAMING_SNAKE_CASE_ : Any = ["""image"""]
SCREAMING_SNAKE_CASE_ : Optional[int] = ["""image"""]
SCREAMING_SNAKE_CASE_ : Any = [
"""num_images_per_prompt""",
"""num_inference_steps""",
"""generator""",
"""latents""",
"""guidance_scale""",
"""frame_size""",
"""output_type""",
"""return_dict""",
]
SCREAMING_SNAKE_CASE_ : Any = False
@property
def __A ( self ) -> Tuple:
return 32
@property
def __A ( self ) -> Optional[int]:
return 32
@property
def __A ( self ) -> List[str]:
return self.time_input_dim * 4
@property
def __A ( self ) -> Union[str, Any]:
return 8
@property
def __A ( self ) -> Any:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
SCREAMING_SNAKE_CASE = CLIPVisionModel(lowerCAmelCase__ )
return model
@property
def __A ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = CLIPImageProcessor(
crop_size=224 , do_center_crop=lowerCAmelCase__ , do_normalize=lowerCAmelCase__ , do_resize=lowerCAmelCase__ , image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , resample=3 , size=224 , )
return image_processor
@property
def __A ( self ) -> str:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'embedding_proj_norm_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
SCREAMING_SNAKE_CASE = PriorTransformer(**lowerCAmelCase__ )
return model
@property
def __A ( self ) -> List[Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
SCREAMING_SNAKE_CASE = ShapERenderer(**lowerCAmelCase__ )
return model
def __A ( self ) -> Dict:
SCREAMING_SNAKE_CASE = self.dummy_prior
SCREAMING_SNAKE_CASE = self.dummy_image_encoder
SCREAMING_SNAKE_CASE = self.dummy_image_processor
SCREAMING_SNAKE_CASE = self.dummy_renderer
SCREAMING_SNAKE_CASE = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1_024 , prediction_type='sample' , use_karras_sigmas=lowerCAmelCase__ , clip_sample=lowerCAmelCase__ , clip_sample_range=1.0 , )
SCREAMING_SNAKE_CASE = {
'prior': prior,
'image_encoder': image_encoder,
'image_processor': image_processor,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__=0 ) -> List[str]:
SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
if str(lowerCAmelCase__ ).startswith('mps' ):
SCREAMING_SNAKE_CASE = torch.manual_seed(lowerCAmelCase__ )
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = {
'image': input_image,
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def __A ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE = 'cpu'
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = self.pipeline_class(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = pipe(**self.get_dummy_inputs(lowerCAmelCase__ ) )
SCREAMING_SNAKE_CASE = output.images[0]
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
SCREAMING_SNAKE_CASE = np.array(
[
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ) -> Union[str, Any]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __A ( self ) -> List[str]:
SCREAMING_SNAKE_CASE = torch_device == 'cpu'
SCREAMING_SNAKE_CASE = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=lowerCAmelCase__ , relax_max_difference=lowerCAmelCase__ , )
def __A ( self ) -> List[str]:
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = self.pipeline_class(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(lowerCAmelCase__ )
for key in inputs.keys():
if key in self.batch_params:
SCREAMING_SNAKE_CASE = batch_size * [inputs[key]]
SCREAMING_SNAKE_CASE = pipe(**lowerCAmelCase__ , num_images_per_prompt=lowerCAmelCase__ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __A ( self ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ) -> Any:
SCREAMING_SNAKE_CASE = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/corgi.png' )
SCREAMING_SNAKE_CASE = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_img2img_out.npy' )
SCREAMING_SNAKE_CASE = ShapEImgaImgPipeline.from_pretrained('openai/shap-e-img2img' )
SCREAMING_SNAKE_CASE = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
SCREAMING_SNAKE_CASE = pipe(
lowerCAmelCase__ , generator=lowerCAmelCase__ , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
| 38 | 0 |
from PIL import Image
def lowerCamelCase__ ( A__ : Image , A__ : float ):
'''simple docstring'''
def brightness(A__ : int ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" )
return img.point(A__ )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change brightness to 100
UpperCAmelCase_ = change_brightness(img, 100)
brigt_img.save('image_data/lena_brightness.png', format='png')
| 12 |
'''simple docstring'''
def a__ ( lowerCAmelCase__ ) -> int:
UpperCAmelCase__ : Tuple = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def a__ ( lowerCAmelCase__ = 1_00 ) -> int:
UpperCAmelCase__ : Dict = 1
UpperCAmelCase__ : str = 2
for i in range(2 , max_n + 1 ):
UpperCAmelCase__ : Tuple = pre_numerator
UpperCAmelCase__ : Tuple = 2 * i // 3 if i % 3 == 0 else 1
UpperCAmelCase__ : str = cur_numerator
UpperCAmelCase__ : List[str] = e_cont * pre_numerator + temp
return sum_digits(lowerCAmelCase__ )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 181 | 0 |
'''simple docstring'''
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
_SCREAMING_SNAKE_CASE = "."
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = os.path.join(REPO_PATH, "utils/documentation_tests.txt")
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
with open(doctest_file_path) as fp:
for line in fp:
_SCREAMING_SNAKE_CASE = line.strip()
_SCREAMING_SNAKE_CASE = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
_SCREAMING_SNAKE_CASE = "\n".join(non_existent_paths)
raise ValueError(F"""`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}""")
if all_paths != sorted(all_paths):
raise ValueError("Files in `utils/documentation_tests.txt` are not in alphabetical order.")
| 370 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def __lowerCamelCase ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict ) -> int:
for attribute in key.split(""".""" ):
snake_case = getattr(__lowerCAmelCase , __lowerCAmelCase )
if weight_type is not None:
snake_case = getattr(__lowerCAmelCase , __lowerCAmelCase ).shape
else:
snake_case = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
snake_case = value
elif weight_type == "weight_g":
snake_case = value
elif weight_type == "weight_v":
snake_case = value
elif weight_type == "bias":
snake_case = value
else:
snake_case = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def __lowerCamelCase ( __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] ) -> str:
snake_case = []
snake_case = fairseq_model.state_dict()
snake_case = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
snake_case = False
if "conv_layers" in name:
load_conv_layer(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , hf_model.config.feat_extract_norm == """group""" , )
snake_case = True
else:
for key, mapped_key in MAPPING.items():
snake_case = """hubert.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key
if key in name or (key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0] and not is_finetuned):
snake_case = True
if "*" in mapped_key:
snake_case = name.split(__lowerCAmelCase )[0].split(""".""" )[-2]
snake_case = mapped_key.replace("""*""" , __lowerCAmelCase )
if "weight_g" in name:
snake_case = """weight_g"""
elif "weight_v" in name:
snake_case = """weight_v"""
elif "weight" in name:
snake_case = """weight"""
elif "bias" in name:
snake_case = """bias"""
else:
snake_case = None
set_recursively(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
continue
if not is_used:
unused_weights.append(__lowerCAmelCase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def __lowerCamelCase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ) -> List[str]:
snake_case = full_name.split("""conv_layers.""" )[-1]
snake_case = name.split(""".""" )
snake_case = int(items[0] )
snake_case = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
snake_case = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
snake_case = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
snake_case = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
snake_case = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__lowerCAmelCase )
@torch.no_grad()
def __lowerCamelCase ( __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any]=None , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Dict=True ) -> List[Any]:
if config_path is not None:
snake_case = HubertConfig.from_pretrained(__lowerCAmelCase )
else:
snake_case = HubertConfig()
if is_finetuned:
if dict_path:
snake_case = Dictionary.load(__lowerCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
snake_case = target_dict.pad_index
snake_case = target_dict.bos_index
snake_case = target_dict.eos_index
snake_case = len(target_dict.symbols )
snake_case = os.path.join(__lowerCAmelCase , """vocab.json""" )
if not os.path.isdir(__lowerCAmelCase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(__lowerCAmelCase ) )
return
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(target_dict.indices , __lowerCAmelCase )
snake_case = WavaVecaCTCTokenizer(
__lowerCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=__lowerCAmelCase , )
snake_case = True if config.feat_extract_norm == """layer""" else False
snake_case = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , )
snake_case = WavaVecaProcessor(feature_extractor=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
processor.save_pretrained(__lowerCAmelCase )
snake_case = HubertForCTC(__lowerCAmelCase )
else:
snake_case = HubertModel(__lowerCAmelCase )
if is_finetuned:
snake_case , snake_case , snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
snake_case , snake_case , snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
snake_case = model[0].eval()
recursively_load_weights(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
hf_wavavec.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 3 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase_ = {"""configuration_unispeech""": ["""UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP""", """UniSpeechConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""UniSpeechForCTC""",
"""UniSpeechForPreTraining""",
"""UniSpeechForSequenceClassification""",
"""UniSpeechModel""",
"""UniSpeechPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 58 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __SCREAMING_SNAKE_CASE ( A_=None ):
if subparsers is not None:
lowerCAmelCase__ : Optional[Any] = subparsers.add_parser('''test''' )
else:
lowerCAmelCase__ : List[str] = argparse.ArgumentParser('''Accelerate test command''' )
parser.add_argument(
'''--config_file''' , default=A_ , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=A_ )
return parser
def __SCREAMING_SNAKE_CASE ( A_ ):
lowerCAmelCase__ : Optional[int] = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['''test_utils''', '''scripts''', '''test_script.py'''] )
if args.config_file is None:
lowerCAmelCase__ : Optional[Any] = script_name
else:
lowerCAmelCase__ : Any = f'--config_file={args.config_file} {script_name}'
lowerCAmelCase__ : List[Any] = ['''accelerate-launch'''] + test_args.split()
lowerCAmelCase__ : int = execute_subprocess_async(A_ , env=os.environ.copy() )
if result.returncode == 0:
print('''Test is a success! You are ready for your distributed training!''' )
def __SCREAMING_SNAKE_CASE ( ):
lowerCAmelCase__ : Any = test_command_parser()
lowerCAmelCase__ : List[Any] = parser.parse_args()
test_command(A_ )
if __name__ == "__main__":
main()
| 106 | 0 |
"""simple docstring"""
lowercase_ = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
lowercase_ = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
lowercase_ = {
0: "Sunday",
1: "Monday",
2: "Tuesday",
3: "Wednesday",
4: "Thursday",
5: "Friday",
6: "Saturday",
}
def lowercase ( lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> str:
assert len(str(lowerCAmelCase__ ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
__a = year // 100
__a = (5 * (century % 4) + 2) % 7
__a = year % 100
__a = centurian % 12
__a = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
__a = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
__a = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11 |
"""simple docstring"""
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowercase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any]=0.9_99 , lowerCAmelCase__ : List[str]="cosine" , ) -> Optional[int]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowerCAmelCase__ : int ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowerCAmelCase__ : Optional[Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
__a = []
for i in range(lowerCAmelCase__ ):
__a = i / num_diffusion_timesteps
__a = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowerCAmelCase__ ) / alpha_bar_fn(lowerCAmelCase__ ) , lowerCAmelCase__ ) )
return torch.tensor(lowerCAmelCase__ , dtype=torch.floataa )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Tuple = [e.name for e in KarrasDiffusionSchedulers]
__UpperCAmelCase : str = 2
@register_to_config
def __init__( self , _a = 1_000 , _a = 0.0_0085 , _a = 0.012 , _a = "linear" , _a = None , _a = "epsilon" , _a = "linspace" , _a = 0 , ):
if trained_betas is not None:
__a = torch.tensor(_a , dtype=torch.floataa )
elif beta_schedule == "linear":
__a = torch.linspace(_a , _a , _a , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__a = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , _a , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__a = betas_for_alpha_bar(_a )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
__a = 1.0 - self.betas
__a = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(_a , _a , _a )
def __UpperCAmelCase ( self , _a , _a=None ):
if schedule_timesteps is None:
__a = self.timesteps
__a = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
__a = 1 if len(_a ) > 1 else 0
else:
__a = timestep.cpu().item() if torch.is_tensor(_a ) else timestep
__a = self._index_counter[timestep_int]
return indices[pos].item()
@property
def __UpperCAmelCase ( self ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __UpperCAmelCase ( self , _a , _a , ):
__a = self.index_for_timestep(_a )
if self.state_in_first_order:
__a = self.sigmas[step_index]
else:
__a = self.sigmas_interpol[step_index]
__a = sample / ((sigma**2 + 1) ** 0.5)
return sample
def __UpperCAmelCase ( self , _a , _a = None , _a = None , ):
__a = num_inference_steps
__a = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
__a = np.linspace(0 , num_train_timesteps - 1 , _a , dtype=_a )[::-1].copy()
elif self.config.timestep_spacing == "leading":
__a = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__a = (np.arange(0 , _a ) * step_ratio).round()[::-1].copy().astype(_a )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
__a = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__a = (np.arange(_a , 0 , -step_ratio )).round().copy().astype(_a )
timesteps -= 1
else:
raise ValueError(
f'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
__a = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
__a = torch.from_numpy(np.log(_a ) ).to(_a )
__a = np.interp(_a , np.arange(0 , len(_a ) ) , _a )
__a = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
__a = torch.from_numpy(_a ).to(device=_a )
# interpolate sigmas
__a = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
__a = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
__a = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(_a ).startswith('''mps''' ):
# mps does not support float64
__a = torch.from_numpy(_a ).to(_a , dtype=torch.floataa )
else:
__a = torch.from_numpy(_a ).to(_a )
# interpolate timesteps
__a = self.sigma_to_t(_a ).to(_a , dtype=timesteps.dtype )
__a = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
__a = torch.cat([timesteps[:1], interleaved_timesteps] )
__a = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
__a = defaultdict(_a )
def __UpperCAmelCase ( self , _a ):
# get log sigma
__a = sigma.log()
# get distribution
__a = log_sigma - self.log_sigmas[:, None]
# get sigmas range
__a = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
__a = low_idx + 1
__a = self.log_sigmas[low_idx]
__a = self.log_sigmas[high_idx]
# interpolate sigmas
__a = (low - log_sigma) / (low - high)
__a = w.clamp(0 , 1 )
# transform interpolation to time range
__a = (1 - w) * low_idx + w * high_idx
__a = t.view(sigma.shape )
return t
@property
def __UpperCAmelCase ( self ):
return self.sample is None
def __UpperCAmelCase ( self , _a , _a , _a , _a = True , ):
__a = self.index_for_timestep(_a )
# advance index counter by 1
__a = timestep.cpu().item() if torch.is_tensor(_a ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
__a = self.sigmas[step_index]
__a = self.sigmas_interpol[step_index + 1]
__a = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
__a = self.sigmas[step_index - 1]
__a = self.sigmas_interpol[step_index]
__a = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
__a = 0
__a = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
__a = sigma_hat if self.state_in_first_order else sigma_interpol
__a = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
__a = sigma_hat if self.state_in_first_order else sigma_interpol
__a = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError('''prediction_type not implemented yet: sample''' )
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
__a = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
__a = sigma_interpol - sigma_hat
# store for 2nd order step
__a = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
__a = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
__a = sigma_next - sigma_hat
__a = self.sample
__a = None
__a = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_a )
def __UpperCAmelCase ( self , _a , _a , _a , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__a = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(_a ):
# mps does not support float64
__a = self.timesteps.to(original_samples.device , dtype=torch.floataa )
__a = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
__a = self.timesteps.to(original_samples.device )
__a = timesteps.to(original_samples.device )
__a = [self.index_for_timestep(_a , _a ) for t in timesteps]
__a = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
__a = sigma.unsqueeze(-1 )
__a = original_samples + noise * sigma
return noisy_samples
def __len__( self ):
return self.config.num_train_timesteps
| 11 | 1 |
def _a ( lowerCamelCase, lowerCamelCase ):
def get_matched_characters(lowerCamelCase, lowerCamelCase ) -> str:
lowerCamelCase : Tuple = []
lowerCamelCase : Any = min(len(_stra ), len(_stra ) ) // 2
for i, l in enumerate(_stra ):
lowerCamelCase : Tuple = int(max(0, i - limit ) )
lowerCamelCase : Dict = int(min(i + limit + 1, len(_stra ) ) )
if l in _stra[left:right]:
matched.append(__UpperCAmelCase )
lowerCamelCase : List[str] = F'''{_stra[0:_stra.index(__UpperCAmelCase )]} {_stra[_stra.index(__UpperCAmelCase ) + 1:]}'''
return "".join(__UpperCAmelCase )
# matching characters
lowerCamelCase : str = get_matched_characters(__UpperCAmelCase, __UpperCAmelCase )
lowerCamelCase : Optional[int] = get_matched_characters(__UpperCAmelCase, __UpperCAmelCase )
lowerCamelCase : List[Any] = len(__UpperCAmelCase )
# transposition
lowerCamelCase : List[str] = (
len([(ca, ca) for ca, ca in zip(__UpperCAmelCase, __UpperCAmelCase ) if ca != ca] ) // 2
)
if not match_count:
lowerCamelCase : List[Any] = 0.0
else:
lowerCamelCase : Union[str, Any] = (
1
/ 3
* (
match_count / len(__UpperCAmelCase )
+ match_count / len(__UpperCAmelCase )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
lowerCamelCase : Optional[Any] = 0
for ca, ca in zip(stra[:4], stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("""hello""", """world"""))
| 287 |
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class lowercase__ :
'''simple docstring'''
def __init__( self, __magic_name__ = "cpu", __magic_name__ = "openai/clip-vit-large-patch14" ) -> None:
"""simple docstring"""
UpperCamelCase__ : List[str] = device
UpperCamelCase__ : Union[str, Any] = CLIPTokenizerFast.from_pretrained(__magic_name__ )
UpperCamelCase__ : Tuple = [0.4814_5466, 0.457_8275, 0.4082_1073]
UpperCamelCase__ : Union[str, Any] = [0.2686_2954, 0.2613_0258, 0.2757_7711]
UpperCamelCase__ : Dict = torchvision.transforms.Normalize(self.image_mean, self.image_std )
UpperCamelCase__ : List[str] = torchvision.transforms.Resize(224 )
UpperCamelCase__ : Union[str, Any] = torchvision.transforms.CenterCrop(224 )
def UpperCamelCase__ ( self, __magic_name__ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = self.resize(__magic_name__ )
UpperCamelCase__ : Dict = self.center_crop(__magic_name__ )
UpperCamelCase__ : List[str] = self.normalize(__magic_name__ )
return images
def __call__( self, __magic_name__=None, __magic_name__=None, **__magic_name__ ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = self.tokenizer(text=__magic_name__, **__magic_name__ )
UpperCamelCase__ : List[Any] = self.preprocess_img(__magic_name__ )
UpperCamelCase__ : Optional[Any] = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class lowercase__ ( nn.Module ):
'''simple docstring'''
def __init__( self, __magic_name__=10, __magic_name__=0.01, __magic_name__=None, __magic_name__=None, __magic_name__=None, __magic_name__=None, __magic_name__=None, __magic_name__=None, __magic_name__=False, __magic_name__=True, __magic_name__="image", __magic_name__=True, __magic_name__=False, __magic_name__=False, __magic_name__=False, ) -> None:
"""simple docstring"""
super().__init__()
UpperCamelCase__ : Dict = None
UpperCamelCase__ : Tuple = device if device else get_device()
if vqgan:
UpperCamelCase__ : Union[str, Any] = vqgan
else:
UpperCamelCase__ : Any = load_vqgan(self.device, conf_path=__magic_name__, ckpt_path=__magic_name__ )
self.vqgan.eval()
if clip:
UpperCamelCase__ : Optional[Any] = clip
else:
UpperCamelCase__ : Any = CLIPModel.from_pretrained('''openai/clip-vit-base-patch32''' )
self.clip.to(self.device )
UpperCamelCase__ : str = ProcessorGradientFlow(device=self.device )
UpperCamelCase__ : Union[str, Any] = iterations
UpperCamelCase__ : Tuple = lr
UpperCamelCase__ : Optional[int] = log
UpperCamelCase__ : List[Any] = make_grid
UpperCamelCase__ : Optional[Any] = return_val
UpperCamelCase__ : str = quantize
UpperCamelCase__ : int = self.vqgan.decoder.z_shape
def UpperCamelCase__ ( self, __magic_name__=None, __magic_name__=None, __magic_name__=5, __magic_name__=True ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = []
if output_path is None:
UpperCamelCase__ : List[str] = '''./animation.gif'''
if input_path is None:
UpperCamelCase__ : Union[str, Any] = self.save_path
UpperCamelCase__ : Tuple = sorted(glob(input_path + '''/*''' ) )
if not len(__magic_name__ ):
raise ValueError(
'''No images found in save path, aborting (did you pass save_intermediate=True to the generate'''
''' function?)''' )
if len(__magic_name__ ) == 1:
print('''Only one image found in save path, (did you pass save_intermediate=True to the generate function?)''' )
UpperCamelCase__ : Dict = total_duration / len(__magic_name__ )
UpperCamelCase__ : List[Any] = [frame_duration] * len(__magic_name__ )
if extend_frames:
UpperCamelCase__ : List[Any] = 1.5
UpperCamelCase__ : Any = 3
for file_name in paths:
if file_name.endswith('''.png''' ):
images.append(imageio.imread(__magic_name__ ) )
imageio.mimsave(__magic_name__, __magic_name__, duration=__magic_name__ )
print(f"gif saved to {output_path}" )
def UpperCamelCase__ ( self, __magic_name__=None, __magic_name__=None ) -> Any:
"""simple docstring"""
if not (path or img):
raise ValueError('''Input either path or tensor''' )
if img is not None:
raise NotImplementedError
UpperCamelCase__ : List[Any] = preprocess(Image.open(__magic_name__ ), target_image_size=256 ).to(self.device )
UpperCamelCase__ : str = preprocess_vqgan(__magic_name__ )
UpperCamelCase__ ,*UpperCamelCase__ : Union[str, Any] = self.vqgan.encode(__magic_name__ )
return z
def UpperCamelCase__ ( self, __magic_name__ ) -> Any:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = self.latent.detach().requires_grad_()
UpperCamelCase__ : Any = base_latent + transform_vector
if self.quantize:
UpperCamelCase__ ,*UpperCamelCase__ : int = self.vqgan.quantize(__magic_name__ )
else:
UpperCamelCase__ : Optional[int] = trans_latent
return self.vqgan.decode(__magic_name__ )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__=None ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = self.clip_preprocessor(text=__magic_name__, images=__magic_name__, return_tensors='''pt''', padding=__magic_name__ )
UpperCamelCase__ : Optional[int] = self.clip(**__magic_name__ )
UpperCamelCase__ : Tuple = clip_outputs.logits_per_image
if weights is not None:
UpperCamelCase__ : List[Any] = similarity_logits * weights
return similarity_logits.sum()
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__ ) -> Any:
"""simple docstring"""
UpperCamelCase__ : List[str] = self._get_clip_similarity(pos_prompts['''prompts'''], __magic_name__, weights=(1 / pos_prompts['''weights''']) )
if neg_prompts:
UpperCamelCase__ : Tuple = self._get_clip_similarity(neg_prompts['''prompts'''], __magic_name__, weights=neg_prompts['''weights'''] )
else:
UpperCamelCase__ : Optional[int] = torch.tensor([1], device=self.device )
UpperCamelCase__ : Tuple = -torch.log(__magic_name__ ) + torch.log(__magic_name__ )
return loss
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__ ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : List[str] = torch.randn_like(self.latent, requires_grad=__magic_name__, device=self.device )
UpperCamelCase__ : Optional[int] = torch.optim.Adam([vector], lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
UpperCamelCase__ : Tuple = self._add_vector(__magic_name__ )
UpperCamelCase__ : Any = loop_post_process(__magic_name__ )
UpperCamelCase__ : Union[str, Any] = self._get_CLIP_loss(__magic_name__, __magic_name__, __magic_name__ )
print('''CLIP loss''', __magic_name__ )
if self.log:
wandb.log({'''CLIP Loss''': clip_loss} )
clip_loss.backward(retain_graph=__magic_name__ )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__ ) -> List[str]:
"""simple docstring"""
wandb.init(reinit=__magic_name__, project='''face-editor''' )
wandb.config.update({'''Positive Prompts''': positive_prompts} )
wandb.config.update({'''Negative Prompts''': negative_prompts} )
wandb.config.update({'''lr''': self.lr, '''iterations''': self.iterations} )
if image_path:
UpperCamelCase__ : List[str] = Image.open(__magic_name__ )
UpperCamelCase__ : List[Any] = image.resize((256, 256) )
wandb.log('''Original Image''', wandb.Image(__magic_name__ ) )
def UpperCamelCase__ ( self, __magic_name__ ) -> Optional[int]:
"""simple docstring"""
if not prompts:
return []
UpperCamelCase__ : int = []
UpperCamelCase__ : str = []
if isinstance(__magic_name__, __magic_name__ ):
UpperCamelCase__ : Optional[Any] = [prompt.strip() for prompt in prompts.split('''|''' )]
for prompt in prompts:
if isinstance(__magic_name__, (tuple, list) ):
UpperCamelCase__ : Optional[int] = prompt[0]
UpperCamelCase__ : Dict = float(prompt[1] )
elif ":" in prompt:
UpperCamelCase__ ,UpperCamelCase__ : Optional[int] = prompt.split(''':''' )
UpperCamelCase__ : List[Any] = float(__magic_name__ )
else:
UpperCamelCase__ : List[str] = prompt
UpperCamelCase__ : Any = 1.0
processed_prompts.append(__magic_name__ )
weights.append(__magic_name__ )
return {
"prompts": processed_prompts,
"weights": torch.tensor(__magic_name__, device=self.device ),
}
def UpperCamelCase__ ( self, __magic_name__, __magic_name__=None, __magic_name__=None, __magic_name__=True, __magic_name__=False, __magic_name__=True, __magic_name__=True, __magic_name__=None, ) -> str:
"""simple docstring"""
if image_path:
UpperCamelCase__ : Union[str, Any] = self._get_latent(__magic_name__ )
else:
UpperCamelCase__ : Dict = torch.randn(self.latent_dim, device=self.device )
if self.log:
self._init_logging(__magic_name__, __magic_name__, __magic_name__ )
assert pos_prompts, "You must provide at least one positive prompt."
UpperCamelCase__ : Optional[Any] = self.process_prompts(__magic_name__ )
UpperCamelCase__ : Union[str, Any] = self.process_prompts(__magic_name__ )
if save_final and save_path is None:
UpperCamelCase__ : str = os.path.join('''./outputs/''', '''_'''.join(pos_prompts['''prompts'''] ) )
if not os.path.exists(__magic_name__ ):
os.makedirs(__magic_name__ )
else:
UpperCamelCase__ : int = save_path + '''_''' + get_timestamp()
os.makedirs(__magic_name__ )
UpperCamelCase__ : Optional[Any] = save_path
UpperCamelCase__ : str = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('''Original Image''' )
show_pil(custom_to_pil(__magic_name__ ) )
UpperCamelCase__ : Optional[Any] = loop_post_process(__magic_name__ )
for iter, transformed_img in enumerate(self._optimize_CLIP(__magic_name__, __magic_name__, __magic_name__ ) ):
if show_intermediate:
show_pil(__magic_name__ )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path, f"iter_{iter:03d}.png" ) )
if self.log:
wandb.log({'''Image''': wandb.Image(__magic_name__ )} )
if show_final:
show_pil(__magic_name__ )
if save_final:
transformed_img.save(os.path.join(self.save_path, f"iter_{iter:03d}_final.png" ) )
| 201 | 0 |
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowercase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
a : Tuple = IFInpaintingPipeline
a : Union[str, Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
a : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
a : Tuple = PipelineTesterMixin.required_optional_params - {"latents"}
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
return self._get_dummy_components()
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase=0 ) -> Any:
'''simple docstring'''
if str(_lowerCamelCase ).startswith('''mps''' ):
__lowercase = torch.manual_seed(_lowerCamelCase )
else:
__lowercase = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
__lowercase = floats_tensor((1, 3, 32, 32) ,rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
__lowercase = floats_tensor((1, 3, 32, 32) ,rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
__lowercase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() ,reason='''XFormers attention is only available with CUDA and `xformers` installed''' ,)
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' ,reason='''float16 requires CUDA''' )
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1 )
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
self._test_save_load_local()
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 ,)
| 358 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE = {
'''configuration_blenderbot''': [
'''BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlenderbotConfig''',
'''BlenderbotOnnxConfig''',
],
'''tokenization_blenderbot''': ['''BlenderbotTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['''BlenderbotTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlenderbotForCausalLM''',
'''BlenderbotForConditionalGeneration''',
'''BlenderbotModel''',
'''BlenderbotPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''TFBlenderbotForConditionalGeneration''',
'''TFBlenderbotModel''',
'''TFBlenderbotPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''FlaxBlenderbotForConditionalGeneration''',
'''FlaxBlenderbotModel''',
'''FlaxBlenderbotPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 217 | 0 |
'''simple docstring'''
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class A__ :
def __init__( self : str , _a : List[Any] , _a : Optional[Any]=13 , _a : Tuple=7 , _a : int=True , _a : Tuple=True , _a : Any=False , _a : Tuple=True , _a : Optional[int]=99 , _a : List[Any]=32 , _a : Dict=5 , _a : List[str]=4 , _a : str=37 , _a : Dict="gelu" , _a : Optional[int]=0.1 , _a : int=0.1 , _a : List[Any]=512 , _a : int=16 , _a : List[str]=2 , _a : Union[str, Any]=0.02 , _a : Any=3 , _a : str=4 , _a : str=None , ) -> List[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =batch_size
_SCREAMING_SNAKE_CASE =seq_length
_SCREAMING_SNAKE_CASE =is_training
_SCREAMING_SNAKE_CASE =use_input_mask
_SCREAMING_SNAKE_CASE =use_token_type_ids
_SCREAMING_SNAKE_CASE =use_labels
_SCREAMING_SNAKE_CASE =vocab_size
_SCREAMING_SNAKE_CASE =hidden_size
_SCREAMING_SNAKE_CASE =num_hidden_layers
_SCREAMING_SNAKE_CASE =num_attention_heads
_SCREAMING_SNAKE_CASE =intermediate_size
_SCREAMING_SNAKE_CASE =hidden_act
_SCREAMING_SNAKE_CASE =hidden_dropout_prob
_SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE =max_position_embeddings
_SCREAMING_SNAKE_CASE =type_vocab_size
_SCREAMING_SNAKE_CASE =type_sequence_label_size
_SCREAMING_SNAKE_CASE =initializer_range
_SCREAMING_SNAKE_CASE =num_labels
_SCREAMING_SNAKE_CASE =num_choices
_SCREAMING_SNAKE_CASE =scope
def A ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE =None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE =random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE =None
if self.use_token_type_ids:
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
if self.use_labels:
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] , self.num_choices )
_SCREAMING_SNAKE_CASE =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self : Optional[int] ) -> Any:
'''simple docstring'''
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , )
def A ( self : Tuple , _a : str , _a : str , _a : Optional[int] , _a : List[str] , _a : Dict , _a : Union[str, Any] , _a : int ) -> Any:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =BioGptModel(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a , attention_mask=_a )
_SCREAMING_SNAKE_CASE =model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : Union[str, Any] , _a : List[str] , _a : Optional[Any] , _a : str , _a : Optional[int] , _a : Optional[Any] , _a : Dict , _a : Optional[int] , _a : List[Any] , _a : str , ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =BioGptForCausalLM(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self : int , _a : Dict , _a : Tuple , _a : List[str] , _a : str , _a : Optional[int] , *_a : str ) -> List[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =BioGptModel(config=_a )
model.to(_a )
model.eval()
# create attention mask
_SCREAMING_SNAKE_CASE =torch.ones(input_ids.shape , dtype=torch.long , device=_a )
_SCREAMING_SNAKE_CASE =self.seq_length // 2
_SCREAMING_SNAKE_CASE =0
# first forward pass
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =model(_a , attention_mask=_a ).to_tuple()
# create hypothetical next token and extent to next_input_ids
_SCREAMING_SNAKE_CASE =ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
_SCREAMING_SNAKE_CASE =ids_tensor((1,) , _a ).item() + 1
_SCREAMING_SNAKE_CASE =ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
_SCREAMING_SNAKE_CASE =random_other_next_tokens
# append to next input_ids and attn_mask
_SCREAMING_SNAKE_CASE =torch.cat([input_ids, next_tokens] , dim=-1 )
_SCREAMING_SNAKE_CASE =torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=_a )] , dim=1 , )
# get two different outputs
_SCREAMING_SNAKE_CASE =model(_a , attention_mask=_a )['last_hidden_state']
_SCREAMING_SNAKE_CASE =model(_a , past_key_values=_a , attention_mask=_a )['last_hidden_state']
# select random slice
_SCREAMING_SNAKE_CASE =ids_tensor((1,) , output_from_past.shape[-1] ).item()
_SCREAMING_SNAKE_CASE =output_from_no_past[:, -1, random_slice_idx].detach()
_SCREAMING_SNAKE_CASE =output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_a , _a , atol=1e-3 ) )
def A ( self : Any , _a : Dict , _a : List[Any] , _a : str , _a : List[str] , _a : Any , *_a : List[str] ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =BioGptModel(config=_a ).to(_a ).eval()
_SCREAMING_SNAKE_CASE =torch.ones(input_ids.shape , dtype=torch.long , device=_a )
# first forward pass
_SCREAMING_SNAKE_CASE =model(_a , attention_mask=_a , use_cache=_a )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
_SCREAMING_SNAKE_CASE =ids_tensor((self.batch_size, 3) , config.vocab_size )
_SCREAMING_SNAKE_CASE =ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
_SCREAMING_SNAKE_CASE =torch.cat([input_ids, next_tokens] , dim=-1 )
_SCREAMING_SNAKE_CASE =torch.cat([attention_mask, next_attn_mask] , dim=-1 )
_SCREAMING_SNAKE_CASE =model(_a , attention_mask=_a )['last_hidden_state']
_SCREAMING_SNAKE_CASE =model(_a , attention_mask=_a , past_key_values=_a )[
'last_hidden_state'
]
# select random slice
_SCREAMING_SNAKE_CASE =ids_tensor((1,) , output_from_past.shape[-1] ).item()
_SCREAMING_SNAKE_CASE =output_from_no_past[:, -3:, random_slice_idx].detach()
_SCREAMING_SNAKE_CASE =output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_a , _a , atol=1e-3 ) )
def A ( self : Optional[int] , _a : Dict , _a : Any , _a : List[Any] , _a : Tuple , _a : Dict , *_a : Optional[Any] , _a : int=False ) -> Any:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =BioGptForCausalLM(_a )
model.to(_a )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
_SCREAMING_SNAKE_CASE =model(_a , labels=_a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def A ( self : int , _a : Dict , *_a : str ) -> Union[str, Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =BioGptModel(_a )
_SCREAMING_SNAKE_CASE =model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.0_01 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def A ( self : Optional[int] , _a : Dict , _a : int , _a : Tuple , _a : Union[str, Any] , _a : Optional[int] , *_a : int ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.num_labels
_SCREAMING_SNAKE_CASE =BioGptForTokenClassification(_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a , attention_mask=_a , token_type_ids=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self : int ) -> Optional[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) =config_and_inputs
_SCREAMING_SNAKE_CASE ={'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A__ ( A__ , A__ , A__ , unittest.TestCase ):
A__ = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
A__ = (BioGptForCausalLM,) if is_torch_available() else ()
A__ = (
{
'feature-extraction': BioGptModel,
'text-classification': BioGptForSequenceClassification,
'text-generation': BioGptForCausalLM,
'token-classification': BioGptForTokenClassification,
'zero-shot': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
A__ = False
def A ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =BioGptModelTester(self )
_SCREAMING_SNAKE_CASE =ConfigTester(self , config_class=_a , hidden_size=37 )
def A ( self : str ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def A ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def A ( self : Tuple ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_SCREAMING_SNAKE_CASE =type
self.model_tester.create_and_check_model(*_a )
def A ( self : Dict ) -> int:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*_a )
def A ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*_a , gradient_checkpointing=_a )
def A ( self : Any ) -> Optional[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*_a )
def A ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*_a )
def A ( self : Any ) -> Any:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*_a )
@slow
def A ( self : List[str] ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
model.to(_a )
_SCREAMING_SNAKE_CASE =BioGptTokenizer.from_pretrained('microsoft/biogpt' )
_SCREAMING_SNAKE_CASE ='left'
# Define PAD Token = EOS Token = 50256
_SCREAMING_SNAKE_CASE =tokenizer.eos_token
_SCREAMING_SNAKE_CASE =model.config.eos_token_id
# use different length sentences to test batching
_SCREAMING_SNAKE_CASE =[
'Hello, my dog is a little',
'Today, I',
]
_SCREAMING_SNAKE_CASE =tokenizer(_a , return_tensors='pt' , padding=_a )
_SCREAMING_SNAKE_CASE =inputs['input_ids'].to(_a )
_SCREAMING_SNAKE_CASE =model.generate(
input_ids=_a , attention_mask=inputs['attention_mask'].to(_a ) , )
_SCREAMING_SNAKE_CASE =tokenizer(sentences[0] , return_tensors='pt' ).input_ids.to(_a )
_SCREAMING_SNAKE_CASE =model.generate(input_ids=_a )
_SCREAMING_SNAKE_CASE =inputs_non_padded.shape[-1] - inputs['attention_mask'][-1].long().sum().cpu().item()
_SCREAMING_SNAKE_CASE =tokenizer(sentences[1] , return_tensors='pt' ).input_ids.to(_a )
_SCREAMING_SNAKE_CASE =model.generate(input_ids=_a , max_length=model.config.max_length - num_paddings )
_SCREAMING_SNAKE_CASE =tokenizer.batch_decode(_a , skip_special_tokens=_a )
_SCREAMING_SNAKE_CASE =tokenizer.decode(output_non_padded[0] , skip_special_tokens=_a )
_SCREAMING_SNAKE_CASE =tokenizer.decode(output_padded[0] , skip_special_tokens=_a )
_SCREAMING_SNAKE_CASE =[
'Hello, my dog is a little bit bigger than a little bit.',
'Today, I have a good idea of how to use the information',
]
self.assertListEqual(_a , _a )
self.assertListEqual(_a , [non_padded_sentence, padded_sentence] )
@slow
def A ( self : str ) -> Any:
'''simple docstring'''
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE =BioGptModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def A ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE =3
_SCREAMING_SNAKE_CASE =input_dict['input_ids']
_SCREAMING_SNAKE_CASE =input_ids.ne(1 ).to(_a )
_SCREAMING_SNAKE_CASE =ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_SCREAMING_SNAKE_CASE =BioGptForSequenceClassification(_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a , attention_mask=_a , labels=_a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def A ( self : Tuple ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE =3
_SCREAMING_SNAKE_CASE ='multi_label_classification'
_SCREAMING_SNAKE_CASE =input_dict['input_ids']
_SCREAMING_SNAKE_CASE =input_ids.ne(1 ).to(_a )
_SCREAMING_SNAKE_CASE =ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
_SCREAMING_SNAKE_CASE =BioGptForSequenceClassification(_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a , attention_mask=_a , labels=_a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class A__ ( unittest.TestCase ):
@slow
def A ( self : Any ) -> Any:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
_SCREAMING_SNAKE_CASE =torch.tensor([[2, 4805, 9, 656, 21]] )
_SCREAMING_SNAKE_CASE =model(_a )[0]
_SCREAMING_SNAKE_CASE =4_2384
_SCREAMING_SNAKE_CASE =torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , _a )
_SCREAMING_SNAKE_CASE =torch.tensor(
[[[-9.52_36, -9.89_18, 10.45_57], [-11.04_69, -9.64_23, 8.10_22], [-8.86_64, -7.88_26, 5.53_25]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _a , atol=1e-4 ) )
@slow
def A ( self : Any ) -> Any:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =BioGptTokenizer.from_pretrained('microsoft/biogpt' )
_SCREAMING_SNAKE_CASE =BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
model.to(_a )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =tokenizer('COVID-19 is' , return_tensors='pt' ).to(_a )
_SCREAMING_SNAKE_CASE =model.generate(
**_a , min_length=100 , max_length=1024 , num_beams=5 , early_stopping=_a , )
_SCREAMING_SNAKE_CASE =tokenizer.decode(output_ids[0] , skip_special_tokens=_a )
_SCREAMING_SNAKE_CASE =(
'COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'
' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'
' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'
' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'
' more than 800,000 deaths.'
)
self.assertEqual(_a , _a )
| 47 |
'''simple docstring'''
class A__ :
def __init__( self : Union[str, Any] , _a : int ) -> None:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =size
_SCREAMING_SNAKE_CASE =[0] * size
_SCREAMING_SNAKE_CASE =[0] * size
@staticmethod
def A ( _a : int ) -> int:
'''simple docstring'''
return index | (index + 1)
@staticmethod
def A ( _a : int ) -> int:
'''simple docstring'''
return (index & (index + 1)) - 1
def A ( self : Tuple , _a : int , _a : int ) -> None:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =value
while index < self.size:
_SCREAMING_SNAKE_CASE =self.get_prev(_a ) + 1
if current_left_border == index:
_SCREAMING_SNAKE_CASE =value
else:
_SCREAMING_SNAKE_CASE =max(_a , _a , _a )
_SCREAMING_SNAKE_CASE =self.get_next(_a )
def A ( self : int , _a : int , _a : int ) -> int:
'''simple docstring'''
right -= 1 # Because of right is exclusive
_SCREAMING_SNAKE_CASE =0
while left <= right:
_SCREAMING_SNAKE_CASE =self.get_prev(_a )
if left <= current_left:
_SCREAMING_SNAKE_CASE =max(_a , self.tree[right] )
_SCREAMING_SNAKE_CASE =current_left
else:
_SCREAMING_SNAKE_CASE =max(_a , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 47 | 1 |
from __future__ import annotations
import time
UpperCamelCase = list[tuple[int, int]]
UpperCamelCase = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
UpperCamelCase = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class __UpperCAmelCase :
def __init__( self: Tuple , UpperCAmelCase_: int , UpperCAmelCase_: int , UpperCAmelCase_: int , UpperCAmelCase_: int , UpperCAmelCase_: Node | None ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = pos_x
_SCREAMING_SNAKE_CASE = pos_y
_SCREAMING_SNAKE_CASE = (pos_y, pos_x)
_SCREAMING_SNAKE_CASE = goal_x
_SCREAMING_SNAKE_CASE = goal_y
_SCREAMING_SNAKE_CASE = parent
class __UpperCAmelCase :
def __init__( self: int , UpperCAmelCase_: tuple[int, int] , UpperCAmelCase_: tuple[int, int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = Node(start[1] , start[0] , goal[1] , goal[0] , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = Node(goal[1] , goal[0] , goal[1] , goal[0] , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = [self.start]
_SCREAMING_SNAKE_CASE = False
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
while self.node_queue:
_SCREAMING_SNAKE_CASE = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
_SCREAMING_SNAKE_CASE = True
return self.retrace_path(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.get_successors(UpperCAmelCase_ )
for node in successors:
self.node_queue.append(UpperCAmelCase_ )
if not self.reached:
return [self.start.pos]
return None
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: Node ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = []
for action in delta:
_SCREAMING_SNAKE_CASE = parent.pos_x + action[1]
_SCREAMING_SNAKE_CASE = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(UpperCAmelCase_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(UpperCAmelCase_ , UpperCAmelCase_ , self.target.pos_y , self.target.pos_x , UpperCAmelCase_ ) )
return successors
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: Node | None ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = node
_SCREAMING_SNAKE_CASE = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_SCREAMING_SNAKE_CASE = current_node.parent
path.reverse()
return path
class __UpperCAmelCase :
def __init__( self: int , UpperCAmelCase_: Tuple , UpperCAmelCase_: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = BreadthFirstSearch(UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = BreadthFirstSearch(UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = False
def UpperCamelCase ( self: Any ):
'''simple docstring'''
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
_SCREAMING_SNAKE_CASE = self.fwd_bfs.node_queue.pop(0 )
_SCREAMING_SNAKE_CASE = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
_SCREAMING_SNAKE_CASE = True
return self.retrace_bidirectional_path(
UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = current_bwd_node
_SCREAMING_SNAKE_CASE = current_fwd_node
_SCREAMING_SNAKE_CASE = {
self.fwd_bfs: self.fwd_bfs.get_successors(UpperCAmelCase_ ),
self.bwd_bfs: self.bwd_bfs.get_successors(UpperCAmelCase_ ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(UpperCAmelCase_ )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def UpperCamelCase ( self: Any , UpperCAmelCase_: Node , UpperCAmelCase_: Node ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.fwd_bfs.retrace_path(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.bwd_bfs.retrace_path(UpperCAmelCase_ )
bwd_path.pop()
bwd_path.reverse()
_SCREAMING_SNAKE_CASE = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
UpperCamelCase = (0, 0)
UpperCamelCase = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
UpperCamelCase = time.time()
UpperCamelCase = BreadthFirstSearch(init, goal)
UpperCamelCase = bfs.search()
UpperCamelCase = time.time() - start_bfs_time
print('''Unidirectional BFS computation time : ''', bfs_time)
UpperCamelCase = time.time()
UpperCamelCase = BidirectionalBreadthFirstSearch(init, goal)
UpperCamelCase = bd_bfs.search()
UpperCamelCase = time.time() - start_bd_bfs_time
print('''Bidirectional BFS computation time : ''', bd_bfs_time)
| 125 |
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
UpperCamelCase = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.14.0''', '''To fix: pip install -r examples/pytorch/audio-classification/requirements.txt''')
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ = 1_60_00 ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = int(round(sample_rate * max_length ) )
if len(snake_case__ ) <= sample_length:
return wav
_SCREAMING_SNAKE_CASE = randint(0 ,len(snake_case__ ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class __UpperCAmelCase :
__snake_case : Optional[str] = field(default=_UpperCAmelCase ,metadata={"help": "Name of a dataset from the datasets package"} )
__snake_case : Optional[str] = field(
default=_UpperCAmelCase ,metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
__snake_case : Optional[str] = field(
default=_UpperCAmelCase ,metadata={"help": "A file containing the training audio paths and labels."} )
__snake_case : Optional[str] = field(
default=_UpperCAmelCase ,metadata={"help": "A file containing the validation audio paths and labels."} )
__snake_case : str = field(
default="train" ,metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
} ,)
__snake_case : str = field(
default="validation" ,metadata={
"help": (
"The name of the training data set split to use (via the datasets library). Defaults to 'validation'"
)
} ,)
__snake_case : str = field(
default="audio" ,metadata={"help": "The name of the dataset column containing the audio data. Defaults to 'audio'"} ,)
__snake_case : str = field(
default="label" ,metadata={"help": "The name of the dataset column containing the labels. Defaults to 'label'"} )
__snake_case : Optional[int] = field(
default=_UpperCAmelCase ,metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} ,)
__snake_case : Optional[int] = field(
default=_UpperCAmelCase ,metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} ,)
__snake_case : float = field(
default=20 ,metadata={"help": "Audio clips will be randomly cut to this length during training if the value is set."} ,)
@dataclass
class __UpperCAmelCase :
__snake_case : str = field(
default="facebook/wav2vec2-base" ,metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ,)
__snake_case : Optional[str] = field(
default=_UpperCAmelCase ,metadata={"help": "Pretrained config name or path if not the same as model_name"} )
__snake_case : Optional[str] = field(
default=_UpperCAmelCase ,metadata={"help": "Where do you want to store the pretrained models downloaded from the Hub"} )
__snake_case : str = field(
default="main" ,metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} ,)
__snake_case : Optional[str] = field(
default=_UpperCAmelCase ,metadata={"help": "Name or path of preprocessor config."} )
__snake_case : bool = field(
default=_UpperCAmelCase ,metadata={"help": "Whether to freeze the feature encoder layers of the model."} )
__snake_case : bool = field(
default=_UpperCAmelCase ,metadata={"help": "Whether to generate an attention mask in the feature extractor."} )
__snake_case : bool = field(
default=_UpperCAmelCase ,metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} ,)
__snake_case : Optional[bool] = field(
default=_UpperCAmelCase ,metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
__snake_case : bool = field(
default=_UpperCAmelCase ,metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} ,)
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"""The argument `--freeze_feature_extractor` is deprecated and """
"""will be removed in a future version. Use `--freeze_feature_encoder`"""
"""instead. Setting `freeze_feature_encoder==True`.""" , UpperCAmelCase_ , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"""The argument `--freeze_feature_extractor` is deprecated and """
"""should not be used in combination with `--freeze_feature_encoder`."""
"""Only make use of `--freeze_feature_encoder`.""" )
def __lowerCamelCase ( ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_audio_classification""" ,snake_case__ ,snake_case__ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" ,datefmt="""%m/%d/%Y %H:%M:%S""" ,handlers=[logging.StreamHandler(sys.stdout )] ,)
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = training_args.get_process_log_level()
logger.setLevel(snake_case__ )
transformers.utils.logging.set_verbosity(snake_case__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} '
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
_SCREAMING_SNAKE_CASE = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_SCREAMING_SNAKE_CASE = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
"""Use --overwrite_output_dir to train from scratch.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset and prepare it for the audio classification task.
_SCREAMING_SNAKE_CASE = DatasetDict()
_SCREAMING_SNAKE_CASE = load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,split=data_args.train_split_name ,use_auth_token=True if model_args.use_auth_token else None ,)
_SCREAMING_SNAKE_CASE = load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,split=data_args.eval_split_name ,use_auth_token=True if model_args.use_auth_token else None ,)
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. '
"""Make sure to set `--audio_column_name` to the correct audio column - one of """
F'{", ".join(raw_datasets["train"].column_names )}.' )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. '
"""Make sure to set `--label_column_name` to the correct text column - one of """
F'{", ".join(raw_datasets["train"].column_names )}.' )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
_SCREAMING_SNAKE_CASE = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path ,return_attention_mask=model_args.attention_mask ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
_SCREAMING_SNAKE_CASE = raw_datasets.cast_column(
data_args.audio_column_name ,datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
_SCREAMING_SNAKE_CASE = feature_extractor.model_input_names[0]
def train_transforms(snake_case__ ):
_SCREAMING_SNAKE_CASE = []
for audio in batch[data_args.audio_column_name]:
_SCREAMING_SNAKE_CASE = random_subsample(
audio["""array"""] ,max_length=data_args.max_length_seconds ,sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(snake_case__ )
_SCREAMING_SNAKE_CASE = feature_extractor(snake_case__ ,sampling_rate=feature_extractor.sampling_rate )
_SCREAMING_SNAKE_CASE = {model_input_name: inputs.get(snake_case__ )}
_SCREAMING_SNAKE_CASE = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(snake_case__ ):
_SCREAMING_SNAKE_CASE = [audio["""array"""] for audio in batch[data_args.audio_column_name]]
_SCREAMING_SNAKE_CASE = feature_extractor(snake_case__ ,sampling_rate=feature_extractor.sampling_rate )
_SCREAMING_SNAKE_CASE = {model_input_name: inputs.get(snake_case__ )}
_SCREAMING_SNAKE_CASE = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
_SCREAMING_SNAKE_CASE = raw_datasets["""train"""].features[data_args.label_column_name].names
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = {}, {}
for i, label in enumerate(snake_case__ ):
_SCREAMING_SNAKE_CASE = str(snake_case__ )
_SCREAMING_SNAKE_CASE = label
# Load the accuracy metric from the datasets package
_SCREAMING_SNAKE_CASE = evaluate.load("""accuracy""" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(snake_case__ ):
_SCREAMING_SNAKE_CASE = np.argmax(eval_pred.predictions ,axis=1 )
return metric.compute(predictions=snake_case__ ,references=eval_pred.label_ids )
_SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path ,num_labels=len(snake_case__ ) ,labelaid=snake_case__ ,idalabel=snake_case__ ,finetuning_task="""audio-classification""" ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
_SCREAMING_SNAKE_CASE = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path ,from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) ,config=snake_case__ ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,ignore_mismatched_sizes=model_args.ignore_mismatched_sizes ,)
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
_SCREAMING_SNAKE_CASE = (
raw_datasets["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(snake_case__ ,output_all_columns=snake_case__ )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
_SCREAMING_SNAKE_CASE = (
raw_datasets["""eval"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(snake_case__ ,output_all_columns=snake_case__ )
# Initialize our trainer
_SCREAMING_SNAKE_CASE = Trainer(
model=snake_case__ ,args=snake_case__ ,train_dataset=raw_datasets["""train"""] if training_args.do_train else None ,eval_dataset=raw_datasets["""eval"""] if training_args.do_eval else None ,compute_metrics=snake_case__ ,tokenizer=snake_case__ ,)
# Training
if training_args.do_train:
_SCREAMING_SNAKE_CASE = None
if training_args.resume_from_checkpoint is not None:
_SCREAMING_SNAKE_CASE = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_SCREAMING_SNAKE_CASE = last_checkpoint
_SCREAMING_SNAKE_CASE = trainer.train(resume_from_checkpoint=snake_case__ )
trainer.save_model()
trainer.log_metrics("""train""" ,train_result.metrics )
trainer.save_metrics("""train""" ,train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_SCREAMING_SNAKE_CASE = trainer.evaluate()
trainer.log_metrics("""eval""" ,snake_case__ )
trainer.save_metrics("""eval""" ,snake_case__ )
# Write model card and (optionally) push to hub
_SCREAMING_SNAKE_CASE = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """audio-classification""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""audio-classification"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**snake_case__ )
else:
trainer.create_model_card(**snake_case__ )
if __name__ == "__main__":
main()
| 125 | 1 |
"""simple docstring"""
import os
def SCREAMING_SNAKE_CASE ( ) -> List[str]:
_lowerCAmelCase : Optional[int] = os.path.dirname(os.path.realpath(_lowerCamelCase ) )
_lowerCAmelCase : List[Any] = os.path.join(_lowerCamelCase ,"""triangle.txt""" )
with open(_lowerCamelCase ) as f:
_lowerCAmelCase : Dict = f.readlines()
_lowerCAmelCase : int = []
for line in triangle:
_lowerCAmelCase : Optional[Any] = []
for number in line.strip().split(""" """ ):
numbers_from_line.append(int(_lowerCamelCase ) )
a.append(_lowerCamelCase )
for i in range(1 ,len(_lowerCamelCase ) ):
for j in range(len(a[i] ) ):
_lowerCAmelCase : Any = a[i - 1][j] if j != len(a[i - 1] ) else 0
_lowerCAmelCase : List[Any] = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(_lowerCamelCase ,_lowerCamelCase )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 44 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : int= logging.get_logger(__name__)
_a : Optional[Any]= {
"SCUT-DLVCLab/lilt-roberta-en-base": (
"https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json"
),
}
class UpperCamelCase ( lowercase ):
UpperCAmelCase : List[Any] = """lilt"""
def __init__(self : Dict , _A : Any=3_05_22 , _A : Union[str, Any]=7_68 , _A : Any=12 , _A : Tuple=12 , _A : Optional[int]=30_72 , _A : Tuple="gelu" , _A : str=0.1 , _A : List[Any]=0.1 , _A : Union[str, Any]=5_12 , _A : Any=2 , _A : Tuple=0.02 , _A : List[str]=1E-12 , _A : Optional[int]=0 , _A : Optional[Any]="absolute" , _A : Any=None , _A : List[Any]=4 , _A : Optional[int]=10_24 , **_A : Union[str, Any] , ) -> Tuple:
super().__init__(pad_token_id=_A , **_A)
__snake_case : Optional[int] = vocab_size
__snake_case : List[Any] = hidden_size
__snake_case : Any = num_hidden_layers
__snake_case : Optional[int] = num_attention_heads
__snake_case : Optional[int] = hidden_act
__snake_case : List[str] = intermediate_size
__snake_case : Union[str, Any] = hidden_dropout_prob
__snake_case : Dict = attention_probs_dropout_prob
__snake_case : List[Any] = max_position_embeddings
__snake_case : Dict = type_vocab_size
__snake_case : List[Any] = initializer_range
__snake_case : Optional[Any] = layer_norm_eps
__snake_case : Optional[int] = position_embedding_type
__snake_case : Any = classifier_dropout
__snake_case : Optional[int] = channel_shrink_ratio
__snake_case : Tuple = max_ad_position_embeddings
| 172 | 0 |
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCAmelCase :
def __init__( self :Tuple , _lowercase :Union[str, Any] , _lowercase :Dict=3 , _lowercase :List[Any]=32 , _lowercase :List[Any]=3 , _lowercase :str=10 , _lowercase :int=[10, 20, 30, 40] , _lowercase :Any=[1, 1, 2, 1] , _lowercase :List[Any]=True , _lowercase :Union[str, Any]=True , _lowercase :Optional[int]="relu" , _lowercase :str=3 , _lowercase :int=None , ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = num_channels
lowercase__ = embeddings_size
lowercase__ = hidden_sizes
lowercase__ = depths
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = hidden_act
lowercase__ = num_labels
lowercase__ = scope
lowercase__ = len(lowerCamelCase__ )
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.num_labels )
lowercase__ = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self :str ):
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def UpperCAmelCase ( self :Dict , _lowercase :str , _lowercase :List[Any] , _lowercase :Optional[int] ):
'''simple docstring'''
lowercase__ = TFRegNetModel(config=lowerCamelCase__ )
lowercase__ = model(lowerCamelCase__ , training=lowerCamelCase__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCAmelCase ( self :Optional[int] , _lowercase :int , _lowercase :Dict , _lowercase :List[Any] ):
'''simple docstring'''
lowercase__ = self.num_labels
lowercase__ = TFRegNetForImageClassification(lowerCamelCase__ )
lowercase__ = model(lowerCamelCase__ , labels=lowerCamelCase__ , training=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase ( self :str ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
lowercase__ = config_and_inputs
lowercase__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
__lowerCamelCase = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
__lowerCamelCase = (
{"feature-extraction": TFRegNetModel, "image-classification": TFRegNetForImageClassification}
if is_tf_available()
else {}
)
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
def UpperCAmelCase ( self :str ):
'''simple docstring'''
lowercase__ = TFRegNetModelTester(self )
lowercase__ = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ )
def UpperCAmelCase ( self :Union[str, Any] ):
'''simple docstring'''
return
@unittest.skip(reason="RegNet does not use inputs_embeds" )
def UpperCAmelCase ( self :Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , )
@slow
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
super().test_keras_fit()
@unittest.skip(reason="RegNet does not support input and output embeddings" )
def UpperCAmelCase ( self :Union[str, Any] ):
'''simple docstring'''
pass
def UpperCAmelCase ( self :str ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(lowerCamelCase__ )
lowercase__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
def check_hidden_states_output(_lowercase :Dict , _lowercase :str , _lowercase :List[str] ):
lowercase__ = model_class(lowerCamelCase__ )
lowercase__ = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) , training=lowerCamelCase__ )
lowercase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase__ = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase__ ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase__ = layer_type
lowercase__ = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def UpperCAmelCase ( self :Any ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(_lowercase :Optional[Any] , _lowercase :Optional[Any] , _lowercase :List[str] , _lowercase :Any={} ):
lowercase__ = model(lowerCamelCase__ , return_dict=lowerCamelCase__ , **lowerCamelCase__ )
lowercase__ = model(lowerCamelCase__ , return_dict=lowerCamelCase__ , **lowerCamelCase__ ).to_tuple()
def recursive_check(_lowercase :Optional[int] , _lowercase :Tuple ):
if isinstance(lowerCamelCase__ , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(lowerCamelCase__ , lowerCamelCase__ ):
recursive_check(lowerCamelCase__ , lowerCamelCase__ )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(lowerCamelCase__ , lowerCamelCase__ ) ) , msg=(
"Tuple and dict output are not equal. Difference:"
f''' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}'''
) , )
recursive_check(lowerCamelCase__ , lowerCamelCase__ )
for model_class in self.all_model_classes:
lowercase__ = model_class(lowerCamelCase__ )
lowercase__ = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
lowercase__ = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
check_equivalence(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowercase__ = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
lowercase__ = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
check_equivalence(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowercase__ = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
lowercase__ = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
check_equivalence(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , {"output_hidden_states": True} )
lowercase__ = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
lowercase__ = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
check_equivalence(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , {"output_hidden_states": True} )
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
@slow
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = TFRegNetModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def _A ( ):
lowercase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
@cached_property
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase ( self :int ):
'''simple docstring'''
lowercase__ = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=lowerCamelCase__ , return_tensors="tf" )
# forward pass
lowercase__ = model(**lowerCamelCase__ , training=lowerCamelCase__ )
# verify the logits
lowercase__ = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
lowercase__ = tf.constant([-0.4180, -1.5051, -3.4836] )
tf.debugging.assert_near(outputs.logits[0, :3] , lowerCamelCase__ , atol=1e-4 )
| 357 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def _A ( __magic_name__ ):
lowercase__ = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
__lowerCamelCase = StableDiffusionLatentUpscalePipeline
__lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'height',
'width',
'cross_attention_kwargs',
'negative_prompt_embeds',
'prompt_embeds',
}
__lowerCamelCase = PipelineTesterMixin.required_optional_params - {'num_images_per_prompt'}
__lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__lowerCamelCase = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__lowerCamelCase = frozenset([] )
__lowerCamelCase = True
@property
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
lowercase__ = 1
lowercase__ = 4
lowercase__ = (16, 16)
lowercase__ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_lowercase )
return image
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
act_fn="gelu" , attention_head_dim=8 , norm_num_groups=_lowercase , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=1_60 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
"KDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
) , in_channels=8 , mid_block_type=_lowercase , only_cross_attention=_lowercase , out_channels=5 , resnet_time_scale_shift="scale_shift" , time_embedding_type="fourier" , timestep_post_act="gelu" , up_block_types=("KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KUpBlock2D") , )
lowercase__ = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
lowercase__ = EulerDiscreteScheduler(prediction_type="sample" )
lowercase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="quick_gelu" , projection_dim=5_12 , )
lowercase__ = CLIPTextModel(_lowercase )
lowercase__ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowercase__ = {
"unet": model.eval(),
"vae": vae.eval(),
"scheduler": scheduler,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def UpperCAmelCase ( self :Dict , _lowercase :Union[str, Any] , _lowercase :int=0 ):
'''simple docstring'''
if str(_lowercase ).startswith("mps" ):
lowercase__ = torch.manual_seed(_lowercase )
else:
lowercase__ = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
lowercase__ = {
"prompt": "A painting of a squirrel eating a burger",
"image": self.dummy_image.cpu(),
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
lowercase__ = "cpu"
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
lowercase__ = self.get_dummy_inputs(_lowercase )
lowercase__ = pipe(**_lowercase ).images
lowercase__ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 2_56, 2_56, 3) )
lowercase__ = np.array(
[0.47222412, 0.41921633, 0.44717434, 0.46874192, 0.42588258, 0.46150726, 0.4677534, 0.45583832, 0.48579055] )
lowercase__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_lowercase , 1e-3 )
def UpperCAmelCase ( self :Any ):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=7e-3 )
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
super().test_cpu_offload_forward_pass(expected_max_diff=3e-3 )
def UpperCAmelCase ( self :int ):
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=7e-3 )
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3 )
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
super().test_save_load_local(expected_max_difference=3e-3 )
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
lowercase__ = [
"DDIMScheduler",
"DDPMScheduler",
"PNDMScheduler",
"HeunDiscreteScheduler",
"EulerAncestralDiscreteScheduler",
"KDPM2DiscreteScheduler",
"KDPM2AncestralDiscreteScheduler",
"DPMSolverSDEScheduler",
]
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**_lowercase )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
lowercase__ = self.get_dummy_inputs(_lowercase )
lowercase__ = 2
lowercase__ = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
lowercase__ = getattr(_lowercase , scheduler_enum.name )
lowercase__ = scheduler_cls.from_config(pipe.scheduler.config )
lowercase__ = pipe(**_lowercase )[0]
outputs.append(_lowercase )
assert check_same_shape(_lowercase )
@require_torch_gpu
@slow
class lowerCAmelCase ( unittest.TestCase ):
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self :str ):
'''simple docstring'''
lowercase__ = torch.manual_seed(33 )
lowercase__ = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" , torch_dtype=torch.floataa )
pipe.to("cuda" )
lowercase__ = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler" , torch_dtype=torch.floataa )
upscaler.to("cuda" )
lowercase__ = "a photo of an astronaut high resolution, unreal engine, ultra realistic"
lowercase__ = pipe(_lowercase , generator=_lowercase , output_type="latent" ).images
lowercase__ = upscaler(
prompt=_lowercase , image=_lowercase , num_inference_steps=20 , guidance_scale=0 , generator=_lowercase , output_type="np" , ).images[0]
lowercase__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy" )
assert np.abs((expected_image - image).mean() ) < 5e-2
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
lowercase__ = torch.manual_seed(33 )
lowercase__ = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler" , torch_dtype=torch.floataa )
upscaler.to("cuda" )
lowercase__ = "the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas"
lowercase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png" )
lowercase__ = upscaler(
prompt=_lowercase , image=_lowercase , num_inference_steps=20 , guidance_scale=0 , generator=_lowercase , output_type="np" , ).images[0]
lowercase__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy" )
assert np.abs((expected_image - image).max() ) < 5e-2
| 201 | 0 |
"""simple docstring"""
from __future__ import annotations
def __lowercase ( snake_case_ : str ,snake_case_ : list[str] | None = None ,snake_case_ : dict[str, float] | None = None ,snake_case_ : bool = False ,) ->tuple[int, float, str]:
'''simple docstring'''
__A : Optional[int] = cipher_alphabet or [chr(snake_case_ ) for i in range(97 ,123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
__A : int = {
'''a''': 0.08_497,
'''b''': 0.01_492,
'''c''': 0.02_202,
'''d''': 0.04_253,
'''e''': 0.11_162,
'''f''': 0.02_228,
'''g''': 0.02_015,
'''h''': 0.06_094,
'''i''': 0.07_546,
'''j''': 0.00_153,
'''k''': 0.01_292,
'''l''': 0.04_025,
'''m''': 0.02_406,
'''n''': 0.06_749,
'''o''': 0.07_507,
'''p''': 0.01_929,
'''q''': 0.00_095,
'''r''': 0.07_587,
'''s''': 0.06_327,
'''t''': 0.09_356,
'''u''': 0.02_758,
'''v''': 0.00_978,
'''w''': 0.02_560,
'''x''': 0.00_150,
'''y''': 0.01_994,
'''z''': 0.00_077,
}
else:
# Custom frequencies dictionary
__A : List[Any] = frequencies_dict
if not case_sensitive:
__A : str = ciphertext.lower()
# Chi squared statistic values
__A : dict[int, tuple[float, str]] = {}
# cycle through all of the shifts
for shift in range(len(snake_case_ ) ):
__A : Tuple = ''''''
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
__A : str = (alphabet_letters.index(letter.lower() ) - shift) % len(
snake_case_ )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
__A : Optional[Any] = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
__A : Optional[Any] = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
__A : Optional[Any] = decrypted_with_shift.lower().count(snake_case_ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
__A : int = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
__A : Union[str, Any] = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
__A : Optional[Any] = decrypted_with_shift.count(snake_case_ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
__A : Optional[int] = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
__A : Any = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
__A : Optional[int] = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(snake_case_ : int ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
__A : int = min(
snake_case_ ,key=snake_case_ ,)
# Get all the data from the most likely cipher (key, decoded message)
(
(
__A
) , (
__A
) ,
) : Optional[Any] = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 179 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = """facebook/bart-large-mnli"""
_lowerCamelCase = (
"""This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which """
"""should be the text to classify, and `labels`, which should be the list of labels to use for classification. """
"""It returns the most likely label in the list of provided `labels` for the input text."""
)
_lowerCamelCase = """text_classifier"""
_lowerCamelCase = AutoTokenizer
_lowerCamelCase = AutoModelForSequenceClassification
_lowerCamelCase = ["""text""", ["""text"""]]
_lowerCamelCase = ["""text"""]
def UpperCamelCase__( self ):
'''simple docstring'''
super().setup()
__A : List[str] = self.model.config
__A : int = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('''entail''' ):
__A : List[str] = int(__lowerCamelCase )
if self.entailment_id == -1:
raise ValueError('''Could not determine the entailment ID from the model config, please pass it at init.''' )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
__A : Union[str, Any] = labels
return self.pre_processor(
[text] * len(__lowerCamelCase ) , [F"""This example is {label}""" for label in labels] , return_tensors='''pt''' , padding='''max_length''' , )
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
__A : List[Any] = outputs.logits
__A : List[str] = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 179 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_=13 , lowerCamelCase_=3 , lowerCamelCase_=2_24 , lowerCamelCase_=30 , lowerCamelCase_=4_00 , lowerCamelCase_=True , lowerCamelCase_=None , lowerCamelCase_=True , lowerCamelCase_=[0.5, 0.5, 0.5] , lowerCamelCase_=[0.5, 0.5, 0.5] , ) -> Optional[int]:
lowerCAmelCase__ = size if size is not None else {'''height''': 18, '''width''': 18}
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = image_size
lowerCAmelCase__ = min_resolution
lowerCAmelCase__ = max_resolution
lowerCAmelCase__ = do_resize
lowerCAmelCase__ = size
lowerCAmelCase__ = do_normalize
lowerCAmelCase__ = image_mean
lowerCAmelCase__ = image_std
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class a__ ( a__ , unittest.TestCase ):
'''simple docstring'''
lowercase__ : List[Any] = ViTImageProcessor if is_vision_available() else None
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
lowerCAmelCase__ = EfficientFormerImageProcessorTester(self )
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
return self.image_proc_tester.prepare_image_processor_dict()
def __SCREAMING_SNAKE_CASE ( self ) -> int:
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase_ , '''image_mean''' ) )
self.assertTrue(hasattr(lowerCamelCase_ , '''image_std''' ) )
self.assertTrue(hasattr(lowerCamelCase_ , '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCamelCase_ , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCamelCase_ , '''size''' ) )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
pass
def __SCREAMING_SNAKE_CASE ( self ) -> int:
# Initialize image_processor
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase__ = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , Image.Image )
# Test not batched input
lowerCAmelCase__ = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
lowerCAmelCase__ = image_processor(lowerCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
# Initialize image_processor
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase__ = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCamelCase_ , numpify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , np.ndarray )
# Test not batched input
lowerCAmelCase__ = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
lowerCAmelCase__ = image_processor(lowerCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
# Initialize image_processor
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase__ = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCamelCase_ , torchify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , torch.Tensor )
# Test not batched input
lowerCAmelCase__ = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
lowerCAmelCase__ = image_processor(lowerCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , ) | 228 |
'''simple docstring'''
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
__UpperCAmelCase = '''\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
'''
__UpperCAmelCase = '''\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
'''
__UpperCAmelCase = '''
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for \'record\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'prediction_text\': the predicted answer text
- for \'multirc\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question-answer pair as specified by the dataset
- \'prediction\': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for \'record\': list of question-answers dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'answers\': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for \'record\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1\': F1 score
- for \'multirc\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1_m\': Per-question macro-F1 score
- \'f1_a\': Average F1 score over all answers
- for \'axb\':
\'matthews_correlation\': Matthew Correlation
- for \'cb\':
- \'accuracy\': Accuracy
- \'f1\': F1 score
- for all others:
- \'accuracy\': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')
>>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]
>>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')
>>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def _snake_case ( A , A ) -> List[Any]:
return float((preds == labels).mean() )
def _snake_case ( A , A , A="binary" ) -> int:
lowerCAmelCase__ = simple_accuracy(A , A )
lowerCAmelCase__ = float(fa_score(y_true=A , y_pred=A , average=A ) )
return {
"accuracy": acc,
"f1": fa,
}
def _snake_case ( A , A ) -> List[Any]:
lowerCAmelCase__ = {}
for id_pred, label in zip(A , A ):
lowerCAmelCase__ = F"""{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}"""
lowerCAmelCase__ = id_pred['''prediction''']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
lowerCAmelCase__ = [(pred, label)]
lowerCAmelCase__ , lowerCAmelCase__ = [], []
for question, preds_labels in question_map.items():
lowerCAmelCase__ , lowerCAmelCase__ = zip(*A )
lowerCAmelCase__ = fa_score(y_true=A , y_pred=A , average='''macro''' )
fas.append(A )
lowerCAmelCase__ = int(sum(pred == label for pred, label in preds_labels ) == len(A ) )
ems.append(A )
lowerCAmelCase__ = float(sum(A ) / len(A ) )
lowerCAmelCase__ = sum(A ) / len(A )
lowerCAmelCase__ = float(fa_score(y_true=A , y_pred=[id_pred['''prediction'''] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if not self.config_name == '''record''' and not self.config_name == '''multirc''' else None , )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"prediction_text": datasets.Value('''string''' ),
},
"references": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"answers": datasets.Sequence(datasets.Value('''string''' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('''int64''' ),
"paragraph": datasets.Value('''int64''' ),
"question": datasets.Value('''int64''' ),
},
"prediction": datasets.Value('''int64''' ),
},
"references": datasets.Value('''int64''' ),
}
else:
return {
"predictions": datasets.Value('''int64''' ),
"references": datasets.Value('''int64''' ),
}
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> Dict:
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(lowerCamelCase_ , lowerCamelCase_ )}
elif self.config_name == "cb":
return acc_and_fa(lowerCamelCase_ , lowerCamelCase_ , fa_avg='''macro''' )
elif self.config_name == "record":
lowerCAmelCase__ = [
{
'''qas''': [
{'''id''': ref['''idx''']['''query'''], '''answers''': [{'''text''': ans} for ans in ref['''answers''']]}
for ref in references
]
}
]
lowerCAmelCase__ = {pred['''idx''']['''query''']: pred['''prediction_text'''] for pred in predictions}
return evaluate_record(lowerCamelCase_ , lowerCamelCase_ )[0]
elif self.config_name == "multirc":
return evaluate_multirc(lowerCamelCase_ , lowerCamelCase_ )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(lowerCamelCase_ , lowerCamelCase_ )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' ) | 228 | 1 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> Any:
_A : str = tempfile.mkdtemp()
_A : str = BlipImageProcessor()
_A : List[Any] = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" )
_A : Union[str, Any] = BlipaProcessor(_a , _a )
processor.save_pretrained(self.tmpdirname )
def a__ ( self , **_a ) -> List[Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **_a ).tokenizer
def a__ ( self , **_a ) -> Optional[int]:
return AutoProcessor.from_pretrained(self.tmpdirname , **_a ).image_processor
def a__ ( self ) -> Any:
shutil.rmtree(self.tmpdirname )
def a__ ( self ) -> Any:
_A : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_A : List[str] = [Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def a__ ( self ) -> List[Any]:
_A : List[Any] = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_A : Dict = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_A : Optional[int] = self.get_image_processor(do_normalize=_a , padding_value=1.0 )
_A : List[Any] = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def a__ ( self ) -> str:
_A : Any = self.get_image_processor()
_A : Optional[Any] = self.get_tokenizer()
_A : List[Any] = BlipaProcessor(tokenizer=_a , image_processor=_a )
_A : List[str] = self.prepare_image_inputs()
_A : Optional[Any] = image_processor(_a , return_tensors="""np""" )
_A : Optional[int] = processor(images=_a , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def a__ ( self ) -> List[Any]:
_A : Tuple = self.get_image_processor()
_A : List[str] = self.get_tokenizer()
_A : str = BlipaProcessor(tokenizer=_a , image_processor=_a )
_A : Optional[Any] = """lower newer"""
_A : Tuple = processor(text=_a )
_A : Optional[int] = tokenizer(_a , return_token_type_ids=_a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def a__ ( self ) -> Optional[Any]:
_A : str = self.get_image_processor()
_A : List[str] = self.get_tokenizer()
_A : Dict = BlipaProcessor(tokenizer=_a , image_processor=_a )
_A : int = """lower newer"""
_A : List[str] = self.prepare_image_inputs()
_A : Any = processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def a__ ( self ) -> Optional[Any]:
_A : List[Any] = self.get_image_processor()
_A : Optional[int] = self.get_tokenizer()
_A : str = BlipaProcessor(tokenizer=_a , image_processor=_a )
_A : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_A : Tuple = processor.batch_decode(_a )
_A : Optional[Any] = tokenizer.batch_decode(_a )
self.assertListEqual(_a , _a )
def a__ ( self ) -> Any:
_A : List[str] = self.get_image_processor()
_A : Any = self.get_tokenizer()
_A : Optional[Any] = BlipaProcessor(tokenizer=_a , image_processor=_a )
_A : Tuple = """lower newer"""
_A : str = self.prepare_image_inputs()
_A : Any = processor(text=_a , images=_a )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
| 26 |
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_enforce_args(snake_case_,snake_case_ )
if n == 0:
return 0
_A : Tuple = float("""-inf""" )
for i in range(1,n + 1 ):
_A : str = max(
snake_case_,prices[i - 1] + naive_cut_rod_recursive(n - i,snake_case_ ) )
return max_revue
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_enforce_args(snake_case_,snake_case_ )
_A : Dict = [float("""-inf""" ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(snake_case_,snake_case_,snake_case_ )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
_A : List[str] = float("""-inf""" )
for i in range(1,n + 1 ):
_A : Optional[Any] = max(
snake_case_,prices[i - 1] + _top_down_cut_rod_recursive(n - i,snake_case_,snake_case_ ),)
_A : Tuple = max_revenue
return max_rev[n]
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_enforce_args(snake_case_,snake_case_ )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
_A : List[Any] = [float("""-inf""" ) for _ in range(n + 1 )]
_A : Any = 0
for i in range(1,n + 1 ):
_A : Optional[Any] = max_rev[i]
for j in range(1,i + 1 ):
_A : int = max(snake_case_,prices[j - 1] + max_rev[i - j] )
_A : int = max_revenue_i
return max_rev[n]
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
if n < 0:
_A : Optional[Any] = f'''n must be greater than or equal to 0. Got n = {n}'''
raise ValueError(snake_case_ )
if n > len(snake_case_ ):
_A : Any = (
"""Each integral piece of rod must have a corresponding price. """
f'''Got n = {n} but length of prices = {len(snake_case_ )}'''
)
raise ValueError(snake_case_ )
def lowerCAmelCase_ ( ):
_A : Tuple = [6, 10, 12, 15, 20, 23]
_A : List[Any] = len(snake_case_ )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
_A : Any = 36
_A : List[Any] = top_down_cut_rod(snake_case_,snake_case_ )
_A : List[Any] = bottom_up_cut_rod(snake_case_,snake_case_ )
_A : Dict = naive_cut_rod_recursive(snake_case_,snake_case_ )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 26 | 1 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def __UpperCamelCase ( lowerCAmelCase__ : str , lowerCAmelCase__ : str , lowerCAmelCase__ : str , lowerCAmelCase__ : PreTrainedTokenizer , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[int] = None , ):
__a : Optional[int] = {}
if train_file is not None:
__a : Dict = [train_file]
if eval_file is not None:
__a : int = [eval_file]
if test_file is not None:
__a : Any = [test_file]
__a : Tuple = datasets.load_dataset('''csv''' , data_files=lowerCAmelCase__ )
__a : List[Any] = list(ds[list(files.keys() )[0]].features.keys() )
__a : Any = features_name.pop(lowerCAmelCase__ )
__a : int = list(set(ds[list(files.keys() )[0]][label_name] ) )
__a : str = {label: i for i, label in enumerate(lowerCAmelCase__ )}
__a : Tuple = tokenizer.model_input_names
__a : Optional[Any] = {}
if len(lowerCAmelCase__ ) == 1:
for k in files.keys():
__a : Optional[Any] = ds[k].map(
lambda lowerCAmelCase__ : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding='''max_length''' ) , batched=lowerCAmelCase__ , )
elif len(lowerCAmelCase__ ) == 2:
for k in files.keys():
__a : str = ds[k].map(
lambda lowerCAmelCase__ : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding='''max_length''' , ) , batched=lowerCAmelCase__ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
__a : List[str] = {k: v for k, v in ex.items() if k in input_names}
__a : List[Any] = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
__a : Dict = {k: v for k, v in ex.items() if k in input_names}
__a : List[Any] = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
__a : List[Any] = {k: v for k, v in ex.items() if k in input_names}
__a : Dict = labelaid[ex[label_name]]
yield (d, label)
__a : List[Any] = (
tf.data.Dataset.from_generator(
lowerCAmelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
__a : Tuple = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
__a : Tuple = (
tf.data.Dataset.from_generator(
lowerCAmelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
__a : Tuple = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
__a : int = (
tf.data.Dataset.from_generator(
lowerCAmelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
__a : Union[str, Any] = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
lowercase__ =logging.getLogger(__name__)
@dataclass
class UpperCamelCase__ :
_SCREAMING_SNAKE_CASE : int = field(metadata={"help": "Which column contains the label"} )
_SCREAMING_SNAKE_CASE : str = field(default=__lowercase ,metadata={"help": "The path of the training file"} )
_SCREAMING_SNAKE_CASE : Optional[str] = field(default=__lowercase ,metadata={"help": "The path of the development file"} )
_SCREAMING_SNAKE_CASE : Optional[str] = field(default=__lowercase ,metadata={"help": "The path of the test file"} )
_SCREAMING_SNAKE_CASE : int = field(
default=128 ,metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} ,)
_SCREAMING_SNAKE_CASE : bool = field(
default=__lowercase ,metadata={"help": "Overwrite the cached training and evaluation sets"} )
@dataclass
class UpperCamelCase__ :
_SCREAMING_SNAKE_CASE : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase ,metadata={"help": "Pretrained config name or path if not the same as model_name"} )
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase ,metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
_SCREAMING_SNAKE_CASE : bool = field(default=__lowercase ,metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase ,metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} ,)
def __UpperCamelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__a : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
__a , __a , __a : Dict = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(
f"n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, "
f"16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__a : List[str] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__a , __a , __a , __a : List[str] = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=lowerCAmelCase__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
__a : List[str] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(lowerCAmelCase__ ) , labelaid=lowerCAmelCase__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='''text-classification''' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
__a : Union[str, Any] = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('''.bin''' in model_args.model_name_or_path ) , config=lowerCAmelCase__ , cache_dir=model_args.cache_dir , )
def compute_metrics(lowerCAmelCase__ : EvalPrediction ) -> Dict:
__a : int = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
__a : Optional[Any] = TFTrainer(
model=lowerCAmelCase__ , args=lowerCAmelCase__ , train_dataset=lowerCAmelCase__ , eval_dataset=lowerCAmelCase__ , compute_metrics=lowerCAmelCase__ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__a : str = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__a : str = trainer.evaluate()
__a : int = os.path.join(training_args.output_dir , '''eval_results.txt''' )
with open(lowerCAmelCase__ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(f" {key} = {value}" )
writer.write(f"{key} = {value}\n" )
results.update(lowerCAmelCase__ )
return results
if __name__ == "__main__":
main()
| 90 |
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase__ :
def __init__(self : Optional[Any] , snake_case_ : Union[str, Any] , snake_case_ : Optional[Any]=1_3 , snake_case_ : str=3_2 , snake_case_ : Any=2 , snake_case_ : Union[str, Any]=3 , snake_case_ : int=1_6 , snake_case_ : Optional[int]=[3_2, 6_4, 1_2_8] , snake_case_ : str=[1, 2, 1] , snake_case_ : str=[2, 2, 4] , snake_case_ : List[str]=2 , snake_case_ : List[str]=2.0 , snake_case_ : List[Any]=True , snake_case_ : Tuple=0.0 , snake_case_ : Optional[Any]=0.0 , snake_case_ : int=0.1 , snake_case_ : Optional[int]="gelu" , snake_case_ : List[str]=False , snake_case_ : Optional[int]=True , snake_case_ : Optional[int]=0.02 , snake_case_ : List[str]=1E-5 , snake_case_ : List[Any]=True , snake_case_ : int=None , snake_case_ : List[Any]=True , snake_case_ : Optional[Any]=1_0 , snake_case_ : Union[str, Any]=8 , snake_case_ : Optional[Any]=["stage1", "stage2"] , snake_case_ : List[Any]=[1, 2] , ):
__a : Tuple = parent
__a : str = batch_size
__a : Any = image_size
__a : List[Any] = patch_size
__a : List[Any] = num_channels
__a : List[str] = embed_dim
__a : str = hidden_sizes
__a : Any = depths
__a : List[str] = num_heads
__a : Any = window_size
__a : List[str] = mlp_ratio
__a : Optional[int] = qkv_bias
__a : Any = hidden_dropout_prob
__a : List[str] = attention_probs_dropout_prob
__a : str = drop_path_rate
__a : Optional[Any] = hidden_act
__a : Optional[int] = use_absolute_embeddings
__a : List[str] = patch_norm
__a : int = layer_norm_eps
__a : Optional[Any] = initializer_range
__a : List[str] = is_training
__a : Dict = scope
__a : Optional[Any] = use_labels
__a : Union[str, Any] = type_sequence_label_size
__a : Optional[int] = encoder_stride
__a : str = out_features
__a : Optional[int] = out_indices
def lowerCAmelCase (self : Dict ):
__a : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : Dict = None
if self.use_labels:
__a : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a : List[str] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase (self : Optional[Any] ):
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def lowerCAmelCase (self : Dict , snake_case_ : int , snake_case_ : Tuple , snake_case_ : str ):
__a : int = FocalNetModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__a : Dict = model(snake_case_ )
__a : Union[str, Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__a : Dict = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def lowerCAmelCase (self : Tuple , snake_case_ : Any , snake_case_ : Union[str, Any] , snake_case_ : Optional[int] ):
__a : List[str] = FocalNetBackbone(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__a : List[str] = model(snake_case_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
__a : Union[str, Any] = None
__a : Tuple = FocalNetBackbone(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__a : int = model(snake_case_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowerCAmelCase (self : Optional[Any] , snake_case_ : Optional[int] , snake_case_ : Dict , snake_case_ : Optional[Any] ):
__a : List[str] = FocalNetForMaskedImageModeling(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__a : int = model(snake_case_ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__a : str = 1
__a : Optional[Any] = FocalNetForMaskedImageModeling(snake_case_ )
model.to(snake_case_ )
model.eval()
__a : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__a : Union[str, Any] = model(snake_case_ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowerCAmelCase (self : Union[str, Any] , snake_case_ : List[Any] , snake_case_ : Any , snake_case_ : int ):
__a : Dict = self.type_sequence_label_size
__a : Optional[Any] = FocalNetForImageClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
__a : Union[str, Any] = model(snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__a : Optional[int] = 1
__a : str = FocalNetForImageClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
__a : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__a : List[Any] = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase (self : List[Any] ):
__a : Any = self.prepare_config_and_inputs()
__a , __a , __a : Any = config_and_inputs
__a : Tuple = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( __lowercase ,__lowercase ,unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE : Optional[Any] = (
{"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE : str = False
_SCREAMING_SNAKE_CASE : str = False
_SCREAMING_SNAKE_CASE : Optional[Any] = False
_SCREAMING_SNAKE_CASE : Any = False
_SCREAMING_SNAKE_CASE : Dict = False
def lowerCAmelCase (self : List[Any] ):
__a : Union[str, Any] = FocalNetModelTester(self )
__a : Dict = ConfigTester(self , config_class=snake_case_ , embed_dim=3_7 , has_text_modality=snake_case_ )
def lowerCAmelCase (self : Any ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase (self : Dict ):
return
def lowerCAmelCase (self : Dict ):
__a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def lowerCAmelCase (self : Tuple ):
__a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*snake_case_ )
def lowerCAmelCase (self : Any ):
__a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*snake_case_ )
def lowerCAmelCase (self : Optional[int] ):
__a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_ )
@unittest.skip(reason='''FocalNet does not use inputs_embeds''' )
def lowerCAmelCase (self : Optional[Any] ):
pass
@unittest.skip(reason='''FocalNet does not use feedforward chunking''' )
def lowerCAmelCase (self : str ):
pass
def lowerCAmelCase (self : Tuple ):
__a , __a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__a : int = model_class(snake_case_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__a : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case_ , nn.Linear ) )
def lowerCAmelCase (self : Optional[int] ):
__a , __a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__a : str = model_class(snake_case_ )
__a : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : Dict = [*signature.parameters.keys()]
__a : str = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , snake_case_ )
def lowerCAmelCase (self : Tuple , snake_case_ : Tuple , snake_case_ : Dict , snake_case_ : Any , snake_case_ : Optional[Any] ):
__a : Any = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
__a : Dict = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
__a : Union[str, Any] = outputs.hidden_states
__a : Dict = getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(snake_case_ ) , snake_case_ )
# FocalNet has a different seq_length
__a : int = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__a : Dict = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
__a : Optional[Any] = outputs.reshaped_hidden_states
self.assertEqual(len(snake_case_ ) , snake_case_ )
__a , __a , __a , __a : List[Any] = reshaped_hidden_states[0].shape
__a : List[str] = (
reshaped_hidden_states[0].view(snake_case_ , snake_case_ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def lowerCAmelCase (self : Optional[int] ):
__a , __a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__a : Dict = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
__a : Any = True
self.check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a : Optional[int] = True
self.check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
def lowerCAmelCase (self : List[Any] ):
__a , __a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__a : List[str] = 3
__a : Optional[int] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__a : List[str] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__a : List[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__a : Union[str, Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
__a : int = True
self.check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a : List[str] = True
self.check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ , (padded_height, padded_width) )
@slow
def lowerCAmelCase (self : str ):
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : Dict = FocalNetModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def lowerCAmelCase (self : List[Any] ):
__a , __a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__a : Optional[int] = _config_zero_init(snake_case_ )
for model_class in self.all_model_classes:
__a : str = model_class(config=snake_case_ )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@require_vision
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase (self : str ):
# TODO update organization
return AutoImageProcessor.from_pretrained('''microsoft/focalnet-tiny''' ) if is_vision_available() else None
@slow
def lowerCAmelCase (self : str ):
__a : int = FocalNetForImageClassification.from_pretrained('''microsoft/focalnet-tiny''' ).to(snake_case_ )
__a : Optional[Any] = self.default_image_processor
__a : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
__a : Optional[Any] = image_processor(images=snake_case_ , return_tensors='''pt''' ).to(snake_case_ )
# forward pass
with torch.no_grad():
__a : Any = model(**snake_case_ )
# verify the logits
__a : List[Any] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , snake_case_ )
__a : Union[str, Any] = torch.tensor([0.2166, -0.4368, 0.2191] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case_ , atol=1E-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 2_8_1 )
@require_torch
class UpperCamelCase__ ( __lowercase ,unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Tuple = (FocalNetBackbone,) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE : int = FocalNetConfig
_SCREAMING_SNAKE_CASE : Any = False
def lowerCAmelCase (self : Tuple ):
__a : Union[str, Any] = FocalNetModelTester(self )
| 90 | 1 |
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
a : int = abspath(join(dirname(__file__), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def __magic_name__ ( __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
config.addinivalue_line(
'''markers''', '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' )
config.addinivalue_line(
'''markers''', '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' )
config.addinivalue_line('''markers''', '''is_pipeline_test: mark test to run only when pipelines are tested''' )
config.addinivalue_line('''markers''', '''is_staging_test: mark test to run only in the staging environment''' )
config.addinivalue_line('''markers''', '''accelerate_tests: mark test that require accelerate''' )
config.addinivalue_line('''markers''', '''tool_tests: mark the tool tests that are run on their specific schedule''' )
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
snake_case_ = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(__UpperCAmelCase, id=__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
if exitstatus == 5:
snake_case_ = 0
# Doctest custom flag to ignore output.
a : Union[str, Any] = doctest.register_optionflag('IGNORE_RESULT')
a : Optional[int] = doctest.OutputChecker
class a ( _lowerCamelCase ):
def A_ ( self : List[Any] , lowercase_ : int , lowercase_ : Tuple , lowercase_ : Optional[int] ):
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , lowercase_ , lowercase_ , lowercase_ )
a : List[Any] = CustomOutputChecker
a : Optional[int] = HfDoctestModule
a : Tuple = HfDocTestParser
| 56 |
'''simple docstring'''
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class a :
def __init__( self : List[Any] , lowercase_ : Optional[Any] , lowercase_ : Any=13 , lowercase_ : Optional[Any]=7 , lowercase_ : Optional[Any]=True , lowercase_ : Dict=True , lowercase_ : Tuple=False , lowercase_ : Optional[Any]=True , lowercase_ : Any=99 , lowercase_ : Union[str, Any]=64 , lowercase_ : str=5 , lowercase_ : int=4 , lowercase_ : List[Any]=64 , lowercase_ : Dict="gelu" , lowercase_ : Optional[int]=0.1 , lowercase_ : Optional[Any]=0.1 , lowercase_ : Tuple=512 , lowercase_ : List[Any]=16 , lowercase_ : str=2 , lowercase_ : List[str]=0.02 , lowercase_ : Optional[Any]=3 , lowercase_ : Optional[Any]=4 , lowercase_ : List[Any]=None , ):
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = num_choices
snake_case_ = scope
def A_ ( self : List[str] ):
return MPNetConfig.from_pretrained('''microsoft/mpnet-base''' )
def A_ ( self : str ):
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_input_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def A_ ( self : Tuple ):
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def A_ ( self : Any , lowercase_ : Dict , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : Optional[int] ):
snake_case_ = MPNetModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ = model(lowercase_ , lowercase_ )
snake_case_ = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A_ ( self : str , lowercase_ : Optional[Any] , lowercase_ : Any , lowercase_ : str , lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : Optional[int] ):
snake_case_ = MPNetForQuestionAnswering(config=lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ = model(
lowercase_ , attention_mask=lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A_ ( self : Tuple , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : Any ):
snake_case_ = self.num_labels
snake_case_ = MPNetForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A_ ( self : Any , lowercase_ : Any , lowercase_ : str , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : Union[str, Any] , lowercase_ : Dict ):
snake_case_ = self.num_choices
snake_case_ = MPNetForMultipleChoice(config=lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ = model(
lowercase_ , attention_mask=lowercase_ , labels=lowercase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A_ ( self : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : str , lowercase_ : int , lowercase_ : List[str] ):
snake_case_ = self.num_labels
snake_case_ = MPNetForTokenClassification(config=lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A_ ( self : Union[str, Any] ):
snake_case_ = self.prepare_config_and_inputs()
((snake_case_) ,(snake_case_) ,(snake_case_) ,(snake_case_) ,(snake_case_) ,(snake_case_)) = config_and_inputs
snake_case_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class a ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
snake_case_ = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
snake_case_ = (
{
"feature-extraction": MPNetModel,
"fill-mask": MPNetForMaskedLM,
"question-answering": MPNetForQuestionAnswering,
"text-classification": MPNetForSequenceClassification,
"token-classification": MPNetForTokenClassification,
"zero-shot": MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = True
def A_ ( self : Tuple ):
snake_case_ = MPNetModelTester(self )
snake_case_ = ConfigTester(self , config_class=lowercase_ , hidden_size=37 )
def A_ ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def A_ ( self : Tuple ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*lowercase_ )
def A_ ( self : List[Any] ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*lowercase_ )
def A_ ( self : List[Any] ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*lowercase_ )
def A_ ( self : Union[str, Any] ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*lowercase_ )
def A_ ( self : Tuple ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*lowercase_ )
@require_torch
class a ( unittest.TestCase ):
@slow
def A_ ( self : List[Any] ):
snake_case_ = MPNetModel.from_pretrained('''microsoft/mpnet-base''' )
snake_case_ = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
snake_case_ = model(lowercase_ )[0]
snake_case_ = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , lowercase_ )
snake_case_ = torch.tensor(
[[[-0.0550, 0.1943, -0.0740], [-0.0562, 0.2211, -0.0579], [-0.0437, 0.3337, -0.0641]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase_ , atol=1e-4 ) )
| 56 | 1 |
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A:
'''simple docstring'''
def __init__( self : str , A_ : Optional[int] , A_ : str=3 , A_ : Optional[int]=32 , A_ : Optional[Any]=3 , A_ : Optional[int]=10 , A_ : List[Any]=[10, 20, 30, 40] , A_ : Dict=[1, 1, 2, 1] , A_ : Tuple=True , A_ : Union[str, Any]=True , A_ : Dict="relu" , A_ : Any=3 , A_ : str=None , ) -> int:
"""simple docstring"""
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = image_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = embeddings_size
lowerCamelCase_ = hidden_sizes
lowerCamelCase_ = depths
lowerCamelCase_ = is_training
lowerCamelCase_ = use_labels
lowerCamelCase_ = hidden_act
lowerCamelCase_ = num_labels
lowerCamelCase_ = scope
lowerCamelCase_ = len(__a )
def a__ ( self : int ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase_ = self.get_config()
return config, pixel_values, labels
def a__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def a__ ( self : Optional[Any] , A_ : Any , A_ : Union[str, Any] , A_ : Union[str, Any] ) -> str:
"""simple docstring"""
lowerCamelCase_ = TFRegNetModel(config=__a )
lowerCamelCase_ = model(__a , training=__a )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a__ ( self : List[str] , A_ : Dict , A_ : Union[str, Any] , A_ : Optional[Any] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = TFRegNetForImageClassification(__a )
lowerCamelCase_ = model(__a , labels=__a , training=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self : int ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = self.prepare_config_and_inputs()
lowerCamelCase_ = config_and_inputs
lowerCamelCase_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class A( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
UpperCamelCase = (
{'''feature-extraction''': TFRegNetModel, '''image-classification''': TFRegNetForImageClassification}
if is_tf_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def a__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = TFRegNetModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=__a , has_text_modality=__a )
def a__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
return
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def a__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , )
@slow
def a__ ( self : List[str] ) -> Any:
"""simple docstring"""
super().test_keras_fit()
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def a__ ( self : str ) -> str:
"""simple docstring"""
pass
def a__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(__a )
lowerCamelCase_ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ = [*signature.parameters.keys()]
lowerCamelCase_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , __a )
def a__ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def a__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
def check_hidden_states_output(A_ : List[str] , A_ : List[Any] , A_ : Optional[Any] ):
lowerCamelCase_ = model_class(__a )
lowerCamelCase_ = model(**self._prepare_for_class(__a , __a ) , training=__a )
lowerCamelCase_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase_ = self.model_tester.num_stages
self.assertEqual(len(__a ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowerCamelCase_ = layer_type
lowerCamelCase_ = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ = True
check_hidden_states_output(__a , __a , __a )
def a__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(A_ : Optional[int] , A_ : Tuple , A_ : Union[str, Any] , A_ : List[Any]={} ):
lowerCamelCase_ = model(__a , return_dict=__a , **__a )
lowerCamelCase_ = model(__a , return_dict=__a , **__a ).to_tuple()
def recursive_check(A_ : Tuple , A_ : Optional[int] ):
if isinstance(__a , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(__a , __a ):
recursive_check(__a , __a )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(__a , __a ) ) , msg=(
'Tuple and dict output are not equal. Difference:'
f""" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}"""
) , )
recursive_check(__a , __a )
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(__a )
lowerCamelCase_ = self._prepare_for_class(__a , __a )
lowerCamelCase_ = self._prepare_for_class(__a , __a )
check_equivalence(__a , __a , __a )
lowerCamelCase_ = self._prepare_for_class(__a , __a , return_labels=__a )
lowerCamelCase_ = self._prepare_for_class(__a , __a , return_labels=__a )
check_equivalence(__a , __a , __a )
lowerCamelCase_ = self._prepare_for_class(__a , __a )
lowerCamelCase_ = self._prepare_for_class(__a , __a )
check_equivalence(__a , __a , __a , {'output_hidden_states': True} )
lowerCamelCase_ = self._prepare_for_class(__a , __a , return_labels=__a )
lowerCamelCase_ = self._prepare_for_class(__a , __a , return_labels=__a )
check_equivalence(__a , __a , __a , {'output_hidden_states': True} )
def a__ ( self : str ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def a__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = TFRegNetModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class A( unittest.TestCase ):
'''simple docstring'''
@cached_property
def a__ ( self : List[Any] ) -> str:
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def a__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowerCamelCase_ = self.default_image_processor
lowerCamelCase_ = prepare_img()
lowerCamelCase_ = image_processor(images=__a , return_tensors='tf' )
# forward pass
lowerCamelCase_ = model(**__a , training=__a )
# verify the logits
lowerCamelCase_ = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , __a )
lowerCamelCase_ = tf.constant([-0.4180, -1.5051, -3.4836] )
tf.debugging.assert_near(outputs.logits[0, :3] , __a , atol=1E-4 )
| 350 |
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = ['''image_processor''', '''tokenizer''']
UpperCamelCase = '''OwlViTImageProcessor'''
UpperCamelCase = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : Optional[Any] , A_ : Tuple=None , A_ : Tuple=None , **A_ : List[str] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , A_ , )
lowerCamelCase_ = kwargs.pop('feature_extractor' )
lowerCamelCase_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(A_ , A_ )
def __call__( self : List[str] , A_ : List[str]=None , A_ : List[Any]=None , A_ : Dict=None , A_ : Tuple="max_length" , A_ : int="np" , **A_ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
if text is None and query_images is None and images is None:
raise ValueError(
'You have to specify at least one text or query image or image. All three cannot be none.' )
if text is not None:
if isinstance(A_ , A_ ) or (isinstance(A_ , A_ ) and not isinstance(text[0] , A_ )):
lowerCamelCase_ = [self.tokenizer(A_ , padding=A_ , return_tensors=A_ , **A_ )]
elif isinstance(A_ , A_ ) and isinstance(text[0] , A_ ):
lowerCamelCase_ = []
# Maximum number of queries across batch
lowerCamelCase_ = max([len(A_ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(A_ ) != max_num_queries:
lowerCamelCase_ = t + [' '] * (max_num_queries - len(A_ ))
lowerCamelCase_ = self.tokenizer(A_ , padding=A_ , return_tensors=A_ , **A_ )
encodings.append(A_ )
else:
raise TypeError('Input text should be a string, a list of strings or a nested list of strings' )
if return_tensors == "np":
lowerCamelCase_ = np.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
lowerCamelCase_ = np.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
lowerCamelCase_ = jnp.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
lowerCamelCase_ = jnp.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
lowerCamelCase_ = torch.cat([encoding['input_ids'] for encoding in encodings] , dim=0 )
lowerCamelCase_ = torch.cat([encoding['attention_mask'] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
lowerCamelCase_ = tf.stack([encoding['input_ids'] for encoding in encodings] , axis=0 )
lowerCamelCase_ = tf.stack([encoding['attention_mask'] for encoding in encodings] , axis=0 )
else:
raise ValueError('Target return tensor type could not be returned' )
lowerCamelCase_ = BatchEncoding()
lowerCamelCase_ = input_ids
lowerCamelCase_ = attention_mask
if query_images is not None:
lowerCamelCase_ = BatchEncoding()
lowerCamelCase_ = self.image_processor(
A_ , return_tensors=A_ , **A_ ).pixel_values
lowerCamelCase_ = query_pixel_values
if images is not None:
lowerCamelCase_ = self.image_processor(A_ , return_tensors=A_ , **A_ )
if text is not None and images is not None:
lowerCamelCase_ = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
lowerCamelCase_ = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**A_ ) , tensor_type=A_ )
def a__ ( self : Tuple , *A_ : Dict , **A_ : Dict ) -> Any:
"""simple docstring"""
return self.image_processor.post_process(*A_ , **A_ )
def a__ ( self : List[str] , *A_ : Any , **A_ : List[Any] ) -> Optional[int]:
"""simple docstring"""
return self.image_processor.post_process_object_detection(*A_ , **A_ )
def a__ ( self : Any , *A_ : str , **A_ : List[Any] ) -> Any:
"""simple docstring"""
return self.image_processor.post_process_image_guided_detection(*A_ , **A_ )
def a__ ( self : Union[str, Any] , *A_ : Any , **A_ : Union[str, Any] ) -> str:
"""simple docstring"""
return self.tokenizer.batch_decode(*A_ , **A_ )
def a__ ( self : Optional[int] , *A_ : List[Any] , **A_ : int ) -> int:
"""simple docstring"""
return self.tokenizer.decode(*A_ , **A_ )
@property
def a__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , A_ , )
return self.image_processor_class
@property
def a__ ( self : str ) -> List[str]:
"""simple docstring"""
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , A_ , )
return self.image_processor
| 208 | 0 |
"""simple docstring"""
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class UpperCamelCase :
UpperCamelCase : float
UpperCamelCase : TreeNode | None = None
UpperCamelCase : TreeNode | None = None
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
# Validation
def is_valid_tree(UpperCamelCase__ ) -> bool:
if node is None:
return True
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(UpperCamelCase__ ):
raise ValueError(
"""Each node should be type of TreeNode and data should be float.""" )
def is_binary_search_tree_recursive_check(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , UpperCamelCase__ , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , UpperCamelCase__ )
)
return is_binary_search_tree_recursive_check(UpperCamelCase__ , -float("""inf""" ) , float("""inf""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 294 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class UpperCamelCase ( unittest.TestCase ):
def _lowercase ( self : int ) -> List[str]:
_a : Any = """laion/clap-htsat-unfused"""
_a : Union[str, Any] = tempfile.mkdtemp()
def _lowercase ( self : List[Any] , **UpperCAmelCase__ : Any ) -> Dict:
return RobertaTokenizer.from_pretrained(self.checkpoint , **UpperCAmelCase__ )
def _lowercase ( self : List[Any] , **UpperCAmelCase__ : List[str] ) -> int:
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **UpperCAmelCase__ )
def _lowercase ( self : List[Any] ) -> Tuple:
shutil.rmtree(self.tmpdirname )
def _lowercase ( self : List[str] ) -> Optional[int]:
_a : List[str] = self.get_tokenizer()
_a : Any = self.get_feature_extractor()
_a : Optional[Any] = ClapProcessor(tokenizer=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ )
processor.save_pretrained(self.tmpdirname )
_a : List[str] = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase__ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , UpperCAmelCase__ )
def _lowercase ( self : Dict ) -> Optional[int]:
_a : Tuple = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
_a : Dict = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_a : Union[str, Any] = self.get_feature_extractor(do_normalize=UpperCAmelCase__ , padding_value=1.0 )
_a : Union[str, Any] = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=UpperCAmelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase__ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , UpperCAmelCase__ )
def _lowercase ( self : List[str] ) -> Optional[Any]:
_a : Optional[int] = self.get_feature_extractor()
_a : Tuple = self.get_tokenizer()
_a : List[Any] = ClapProcessor(tokenizer=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ )
_a : Any = floats_list((3, 1000) )
_a : List[Any] = feature_extractor(UpperCAmelCase__ , return_tensors="""np""" )
_a : List[str] = processor(audios=UpperCAmelCase__ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _lowercase ( self : Tuple ) -> Optional[int]:
_a : List[str] = self.get_feature_extractor()
_a : Any = self.get_tokenizer()
_a : Any = ClapProcessor(tokenizer=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ )
_a : Optional[int] = """This is a test string"""
_a : Tuple = processor(text=UpperCAmelCase__ )
_a : int = tokenizer(UpperCAmelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _lowercase ( self : List[Any] ) -> Any:
_a : str = self.get_feature_extractor()
_a : List[str] = self.get_tokenizer()
_a : List[Any] = ClapProcessor(tokenizer=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ )
_a : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_a : Dict = processor.batch_decode(UpperCAmelCase__ )
_a : Any = tokenizer.batch_decode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def _lowercase ( self : Dict ) -> List[str]:
_a : str = self.get_feature_extractor()
_a : Optional[Any] = self.get_tokenizer()
_a : Union[str, Any] = ClapProcessor(tokenizer=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
| 294 | 1 |
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = False ) -> str:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : Tuple = F'''Expected string as input, found {type(SCREAMING_SNAKE_CASE_ )}'''
raise ValueError(SCREAMING_SNAKE_CASE_ )
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : List[Any] = F'''Expected boolean as use_pascal parameter, found {type(SCREAMING_SNAKE_CASE_ )}'''
raise ValueError(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[Any] = input_str.split('_' )
lowerCAmelCase__ : Any = 0 if use_pascal else 1
lowerCAmelCase__ : Union[str, Any] = words[start_index:]
lowerCAmelCase__ : List[str] = [word[0].upper() + word[1:] for word in words_to_capitalize]
lowerCAmelCase__ : Optional[int] = '' if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod() | 307 |
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class A__ ( __magic_name__ ):
lowercase = (DPMSolverSDEScheduler,)
lowercase = 10
def _lowerCamelCase ( self : Optional[int] , **a : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = {
'num_train_timesteps': 1_100,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
'noise_sampler_seed': 0,
}
config.update(**a )
return config
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=a )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=a , beta_end=a )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=a )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = self.scheduler_classes[0]
lowerCAmelCase__ : Tuple = self.get_scheduler_config()
lowerCAmelCase__ : List[Any] = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase__ : Dict = self.dummy_model()
lowerCAmelCase__ : int = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase__ : int = sample.to(a )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase__ : List[Any] = scheduler.scale_model_input(a , a )
lowerCAmelCase__ : str = model(a , a )
lowerCAmelCase__ : int = scheduler.step(a , a , a )
lowerCAmelCase__ : Any = output.prev_sample
lowerCAmelCase__ : List[Any] = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Optional[int] = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_6_7.4_7_8_2_1_0_4_4_9_2_1_8_7_5 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_7_8_7_0_5_9_6_4_5_6_5_2_7_7 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_1.5_9_3_5_2_1_1_1_8_1_6_4_0_6 ) < 1E-2
assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_6_8_9_2_2_9_9_6_5_2 ) < 1E-3
else:
assert abs(result_sum.item() - 1_6_2.5_2_3_8_3_4_2_2_8_5_1_5_6_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6 ) < 1E-3
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.scheduler_classes[0]
lowerCAmelCase__ : List[str] = self.get_scheduler_config(prediction_type='v_prediction' )
lowerCAmelCase__ : Any = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase__ : Optional[int] = self.dummy_model()
lowerCAmelCase__ : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase__ : Any = sample.to(a )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase__ : str = scheduler.scale_model_input(a , a )
lowerCAmelCase__ : str = model(a , a )
lowerCAmelCase__ : Dict = scheduler.step(a , a , a )
lowerCAmelCase__ : Tuple = output.prev_sample
lowerCAmelCase__ : int = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Union[str, Any] = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_2_4.7_7_1_4_9_2_0_0_4_3_9_4_5_3 ) < 1E-2
assert abs(result_mean.item() - 0.1_6_2_2_6_2_8_9_0_1_4_8_1_6_2_8_4 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_2_8.1_6_6_3_3_6_0_5_9_5_7_0_3 ) < 1E-2
assert abs(result_mean.item() - 0.1_6_6_8_8_3_2_6_0_0_1_1_6_7_2_9_7 ) < 1E-3
else:
assert abs(result_sum.item() - 1_1_9.8_4_8_7_5_4_8_8_2_8_1_2_5 ) < 1E-2
assert abs(result_mean.item() - 0.1_5_6_0_5_3_0_6_6_2_5_3_6_6_2_1 ) < 1E-3
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.scheduler_classes[0]
lowerCAmelCase__ : Optional[int] = self.get_scheduler_config()
lowerCAmelCase__ : int = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps , device=a )
lowerCAmelCase__ : Tuple = self.dummy_model()
lowerCAmelCase__ : Any = self.dummy_sample_deter.to(a ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowerCAmelCase__ : Dict = scheduler.scale_model_input(a , a )
lowerCAmelCase__ : Optional[int] = model(a , a )
lowerCAmelCase__ : Tuple = scheduler.step(a , a , a )
lowerCAmelCase__ : Dict = output.prev_sample
lowerCAmelCase__ : Union[str, Any] = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Dict = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_6_7.4_6_9_5_7_3_9_7_4_6_0_9_3_8 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_8_0_5_9_3_4_6_0_7_9_8_2_6_3_5 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_1.5_9_3_5_3_6_3_7_6_9_5_3_1_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_8_3_8_2_4_1_5_7_7_1 ) < 1E-3
else:
assert abs(result_sum.item() - 1_6_2.5_2_3_8_3_4_2_2_8_5_1_5_6_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6 ) < 1E-3
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = self.scheduler_classes[0]
lowerCAmelCase__ : Any = self.get_scheduler_config()
lowerCAmelCase__ : Any = scheduler_class(**a , use_karras_sigmas=a )
scheduler.set_timesteps(self.num_inference_steps , device=a )
lowerCAmelCase__ : str = self.dummy_model()
lowerCAmelCase__ : Any = self.dummy_sample_deter.to(a ) * scheduler.init_noise_sigma
lowerCAmelCase__ : str = sample.to(a )
for t in scheduler.timesteps:
lowerCAmelCase__ : Any = scheduler.scale_model_input(a , a )
lowerCAmelCase__ : int = model(a , a )
lowerCAmelCase__ : Union[str, Any] = scheduler.step(a , a , a )
lowerCAmelCase__ : Union[str, Any] = output.prev_sample
lowerCAmelCase__ : Optional[int] = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Any = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_7_6.6_6_9_7_4_1_3_5_7_4_2_1_8_8 ) < 1E-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_7.6_3_6_5_3_5_6_4_4_5_3_1_2_5 ) < 1E-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2
else:
assert abs(result_sum.item() - 1_7_0.3_1_3_5_2_2_3_3_8_8_6_7_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2 | 307 | 1 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {'''vocab_file''': '''spiece.model'''}
__lowercase = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
}
}
__lowercase = {
'''albert-base-v1''': 512,
'''albert-large-v1''': 512,
'''albert-xlarge-v1''': 512,
'''albert-xxlarge-v1''': 512,
'''albert-base-v2''': 512,
'''albert-large-v2''': 512,
'''albert-xlarge-v2''': 512,
'''albert-xxlarge-v2''': 512,
}
__lowercase = '''▁'''
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Tuple = VOCAB_FILES_NAMES
a__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
a__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , __lowercase , __lowercase=True , __lowercase=True , __lowercase=False , __lowercase="[CLS]" , __lowercase="[SEP]" , __lowercase="<unk>" , __lowercase="[SEP]" , __lowercase="<pad>" , __lowercase="[CLS]" , __lowercase="[MASK]" , __lowercase = None , **__lowercase , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__UpperCamelCase :Union[str, Any] = (
AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase , normalized=__lowercase)
if isinstance(__lowercase , __lowercase)
else mask_token
)
__UpperCamelCase :List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__lowercase , remove_space=__lowercase , keep_accents=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , sp_model_kwargs=self.sp_model_kwargs , **__lowercase , )
__UpperCamelCase :str = do_lower_case
__UpperCamelCase :str = remove_space
__UpperCamelCase :Union[str, Any] = keep_accents
__UpperCamelCase :Dict = vocab_file
__UpperCamelCase :str = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(__lowercase)
@property
def UpperCamelCase__ ( self) -> Any:
return len(self.sp_model)
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :Optional[int] = {self.convert_ids_to_tokens(__lowercase): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self) -> Union[str, Any]:
__UpperCamelCase :List[str] = self.__dict__.copy()
__UpperCamelCase :Union[str, Any] = None
return state
def __setstate__( self , __lowercase) -> Any:
__UpperCamelCase :int = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs'''):
__UpperCamelCase :Dict = {}
__UpperCamelCase :Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def UpperCamelCase__ ( self , __lowercase) -> List[str]:
if self.remove_space:
__UpperCamelCase :List[Any] = ''' '''.join(inputs.strip().split())
else:
__UpperCamelCase :List[Any] = inputs
__UpperCamelCase :str = outputs.replace('''``''' , '''"''').replace('''\'\'''' , '''"''')
if not self.keep_accents:
__UpperCamelCase :Optional[Any] = unicodedata.normalize('''NFKD''' , __lowercase)
__UpperCamelCase :List[str] = ''''''.join([c for c in outputs if not unicodedata.combining(__lowercase)])
if self.do_lower_case:
__UpperCamelCase :List[Any] = outputs.lower()
return outputs
def UpperCamelCase__ ( self , __lowercase) -> List[str]:
__UpperCamelCase :Optional[Any] = self.preprocess_text(__lowercase)
__UpperCamelCase :List[str] = self.sp_model.encode(__lowercase , out_type=__lowercase)
__UpperCamelCase :Any = []
for piece in pieces:
if len(__lowercase) > 1 and piece[-1] == str(''',''') and piece[-2].isdigit():
__UpperCamelCase :Union[str, Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(__lowercase , ''''''))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
__UpperCamelCase :int = cur_pieces[1:]
else:
__UpperCamelCase :List[Any] = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(__lowercase)
else:
new_pieces.append(__lowercase)
return new_pieces
def UpperCamelCase__ ( self , __lowercase) -> str:
return self.sp_model.PieceToId(__lowercase)
def UpperCamelCase__ ( self , __lowercase) -> int:
return self.sp_model.IdToPiece(__lowercase)
def UpperCamelCase__ ( self , __lowercase) -> List[Any]:
__UpperCamelCase :int = []
__UpperCamelCase :List[Any] = ''''''
__UpperCamelCase :int = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__lowercase) + token
__UpperCamelCase :List[str] = True
__UpperCamelCase :Optional[Any] = []
else:
current_sub_tokens.append(__lowercase)
__UpperCamelCase :Tuple = False
out_string += self.sp_model.decode(__lowercase)
return out_string.strip()
def UpperCamelCase__ ( self , __lowercase , __lowercase = None) -> List[int]:
__UpperCamelCase :str = [self.sep_token_id]
__UpperCamelCase :Tuple = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase__ ( self , __lowercase , __lowercase = None , __lowercase = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowercase , token_ids_a=__lowercase , already_has_special_tokens=__lowercase)
if token_ids_a is not None:
return [1] + ([0] * len(__lowercase)) + [1] + ([0] * len(__lowercase)) + [1]
return [1] + ([0] * len(__lowercase)) + [1]
def UpperCamelCase__ ( self , __lowercase , __lowercase = None) -> List[int]:
__UpperCamelCase :List[Any] = [self.sep_token_id]
__UpperCamelCase :Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def UpperCamelCase__ ( self , __lowercase , __lowercase = None) -> Tuple[str]:
if not os.path.isdir(__lowercase):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""")
return
__UpperCamelCase :List[Any] = os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(__lowercase) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , __lowercase)
elif not os.path.isfile(self.vocab_file):
with open(__lowercase , '''wb''') as fi:
__UpperCamelCase :Optional[int] = self.sp_model.serialized_model_proto()
fi.write(__lowercase)
return (out_vocab_file,)
| 43 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase : Union[str, Any] = logging.get_logger(__name__)
lowercase : str = {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/config.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/config.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/config.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/config.json',
'bert-base-multilingual-uncased': 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json',
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/config.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/config.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json'
),
'bert-base-cased-finetuned-mrpc': 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json',
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json',
'bert-base-german-dbmdz-uncased': 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json',
'cl-tohoku/bert-base-japanese': 'https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json',
'cl-tohoku/bert-base-japanese-whole-word-masking': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json'
),
'cl-tohoku/bert-base-japanese-char': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json'
),
'cl-tohoku/bert-base-japanese-char-whole-word-masking': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json'
),
'wietsedv/bert-base-dutch-cased': 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json',
# See all BERT models at https://huggingface.co/models?filter=bert
}
class A ( __snake_case ):
__magic_name__ = '''bert'''
def __init__( self , SCREAMING_SNAKE_CASE=30522 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3072 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-12 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE="absolute" , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE , ) -> Optional[int]:
"""simple docstring"""
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
A : Optional[int] = vocab_size
A : Optional[Any] = hidden_size
A : List[Any] = num_hidden_layers
A : List[str] = num_attention_heads
A : Dict = hidden_act
A : Optional[Any] = intermediate_size
A : List[Any] = hidden_dropout_prob
A : List[Any] = attention_probs_dropout_prob
A : Optional[Any] = max_position_embeddings
A : List[str] = type_vocab_size
A : Dict = initializer_range
A : str = layer_norm_eps
A : int = position_embedding_type
A : Dict = use_cache
A : str = classifier_dropout
class A ( __snake_case ):
@property
def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
A : Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
A : Optional[int] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 3 | 0 |
__A = {
"""meter""": """m""",
"""kilometer""": """km""",
"""megametre""": """Mm""",
"""gigametre""": """Gm""",
"""terametre""": """Tm""",
"""petametre""": """Pm""",
"""exametre""": """Em""",
"""zettametre""": """Zm""",
"""yottametre""": """Ym""",
}
# Exponent of the factor(meter)
__A = {
"""m""": 0,
"""km""": 3,
"""Mm""": 6,
"""Gm""": 9,
"""Tm""": 12,
"""Pm""": 15,
"""Em""": 18,
"""Zm""": 21,
"""Ym""": 24,
}
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> float:
_lowerCAmelCase =from_type.lower().strip("""s""" )
_lowerCAmelCase =to_type.lower().strip("""s""" )
_lowerCAmelCase =UNIT_SYMBOL.get(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase =UNIT_SYMBOL.get(_UpperCAmelCase , _UpperCAmelCase )
if from_sanitized not in METRIC_CONVERSION:
_lowerCAmelCase =(
F'''Invalid \'from_type\' value: {from_type!r}.\n'''
F'''Conversion abbreviations are: {', '.join(_UpperCAmelCase )}'''
)
raise ValueError(_UpperCAmelCase )
if to_sanitized not in METRIC_CONVERSION:
_lowerCAmelCase =(
F'''Invalid \'to_type\' value: {to_type!r}.\n'''
F'''Conversion abbreviations are: {', '.join(_UpperCAmelCase )}'''
)
raise ValueError(_UpperCAmelCase )
_lowerCAmelCase =METRIC_CONVERSION[from_sanitized]
_lowerCAmelCase =METRIC_CONVERSION[to_sanitized]
_lowerCAmelCase =1
if from_exponent > to_exponent:
_lowerCAmelCase =from_exponent - to_exponent
else:
_lowerCAmelCase =-(to_exponent - from_exponent)
return value * pow(10 , _UpperCAmelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 351 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
lowerCamelCase = '''cvt'''
def __init__( self , __UpperCAmelCase=3 , __UpperCAmelCase=[7, 3, 3] , __UpperCAmelCase=[4, 2, 2] , __UpperCAmelCase=[2, 1, 1] , __UpperCAmelCase=[64, 1_92, 3_84] , __UpperCAmelCase=[1, 3, 6] , __UpperCAmelCase=[1, 2, 10] , __UpperCAmelCase=[4.0, 4.0, 4.0] , __UpperCAmelCase=[0.0, 0.0, 0.0] , __UpperCAmelCase=[0.0, 0.0, 0.0] , __UpperCAmelCase=[0.0, 0.0, 0.1] , __UpperCAmelCase=[True, True, True] , __UpperCAmelCase=[False, False, True] , __UpperCAmelCase=["dw_bn", "dw_bn", "dw_bn"] , __UpperCAmelCase=[3, 3, 3] , __UpperCAmelCase=[1, 1, 1] , __UpperCAmelCase=[2, 2, 2] , __UpperCAmelCase=[1, 1, 1] , __UpperCAmelCase=[1, 1, 1] , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=1e-12 , **__UpperCAmelCase , ) -> Optional[Any]:
super().__init__(**__UpperCAmelCase )
_lowerCAmelCase =num_channels
_lowerCAmelCase =patch_sizes
_lowerCAmelCase =patch_stride
_lowerCAmelCase =patch_padding
_lowerCAmelCase =embed_dim
_lowerCAmelCase =num_heads
_lowerCAmelCase =depth
_lowerCAmelCase =mlp_ratio
_lowerCAmelCase =attention_drop_rate
_lowerCAmelCase =drop_rate
_lowerCAmelCase =drop_path_rate
_lowerCAmelCase =qkv_bias
_lowerCAmelCase =cls_token
_lowerCAmelCase =qkv_projection_method
_lowerCAmelCase =kernel_qkv
_lowerCAmelCase =padding_kv
_lowerCAmelCase =stride_kv
_lowerCAmelCase =padding_q
_lowerCAmelCase =stride_q
_lowerCAmelCase =initializer_range
_lowerCAmelCase =layer_norm_eps
| 341 | 0 |
lowerCAmelCase__ = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
lowerCAmelCase__ = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
lowerCAmelCase__ = {
0: 'Sunday',
1: 'Monday',
2: 'Tuesday',
3: 'Wednesday',
4: 'Thursday',
5: 'Friday',
6: 'Saturday',
}
def _UpperCAmelCase (UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int ):
assert len(str(UpperCamelCase__ ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
_A : List[Any] = year // 100
_A : List[str] = (5 * (century % 4) + 2) % 7
_A : str = year % 100
_A : List[Any] = centurian % 12
_A : Tuple = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
_A : Optional[Any] = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
_A : Tuple = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
lowerCAmelCase__ = {
'configuration_speecht5': [
'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP',
'SpeechT5Config',
'SpeechT5HifiGanConfig',
],
'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'],
'processing_speecht5': ['SpeechT5Processor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['SpeechT5Tokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'SpeechT5ForSpeechToText',
'SpeechT5ForSpeechToSpeech',
'SpeechT5ForTextToSpeech',
'SpeechT5Model',
'SpeechT5PreTrainedModel',
'SpeechT5HifiGan',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 11 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
A__ = KandinskyImgaImgPipeline
A__ = ['''prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''']
A__ = [
'''prompt''',
'''negative_prompt''',
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
]
A__ = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''negative_prompt''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
A__ = False
@property
def lowerCamelCase__ (self : Dict ) -> Union[str, Any]:
"""simple docstring"""
return 32
@property
def lowerCamelCase__ (self : Any ) -> Dict:
"""simple docstring"""
return 32
@property
def lowerCamelCase__ (self : List[str] ) -> Any:
"""simple docstring"""
return self.time_input_dim
@property
def lowerCamelCase__ (self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowerCamelCase__ (self : int ) -> List[Any]:
"""simple docstring"""
return 100
@property
def lowerCamelCase__ (self : int ) -> Optional[int]:
"""simple docstring"""
lowercase__ = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def lowerCamelCase__ (self : Dict ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
lowercase__ = MultilingualCLIP(_UpperCAmelCase )
lowercase__ = text_encoder.eval()
return text_encoder
@property
def lowerCamelCase__ (self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
lowercase__ = UNetaDConditionModel(**_UpperCAmelCase )
return model
@property
def lowerCamelCase__ (self : int ) -> Optional[int]:
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCamelCase__ (self : Optional[Any] ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCamelCase__ (self : Tuple ) -> Dict:
"""simple docstring"""
lowercase__ = self.dummy_text_encoder
lowercase__ = self.dummy_tokenizer
lowercase__ = self.dummy_unet
lowercase__ = self.dummy_movq
lowercase__ = {
"""num_train_timesteps""": 1000,
"""beta_schedule""": """linear""",
"""beta_start""": 0.00_085,
"""beta_end""": 0.012,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
lowercase__ = DDIMScheduler(**_UpperCAmelCase )
lowercase__ = {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def lowerCamelCase__ (self : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any]=0 ) -> int:
"""simple docstring"""
lowercase__ = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
lowercase__ = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_UpperCAmelCase )
# create init_image
lowercase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
lowercase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase__ = Image.fromarray(np.uinta(_UpperCAmelCase ) ).convert("""RGB""" ).resize((256, 256) )
if str(_UpperCAmelCase ).startswith("""mps""" ):
lowercase__ = torch.manual_seed(_UpperCAmelCase )
else:
lowercase__ = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
lowercase__ = {
"""prompt""": """horse""",
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def lowerCamelCase__ (self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = """cpu"""
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**_UpperCAmelCase )
lowercase__ = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__ = pipe(**self.get_dummy_inputs(_UpperCAmelCase ) )
lowercase__ = output.images
lowercase__ = pipe(
**self.get_dummy_inputs(_UpperCAmelCase ) , return_dict=_UpperCAmelCase , )[0]
lowercase__ = image[0, -3:, -3:, -1]
lowercase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__ = np.array(
[0.61_474_943, 0.6_073_539, 0.43_308_544, 0.5_928_269, 0.47_493_595, 0.46_755_973, 0.4_613_838, 0.45_368_797, 0.50_119_233] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase__ (self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ (self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_img2img_frog.npy""" )
lowercase__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
lowercase__ = """A red cartoon frog, 4k"""
lowercase__ = KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(_UpperCAmelCase )
lowercase__ = KandinskyImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1""" , torch_dtype=torch.floataa )
lowercase__ = pipeline.to(_UpperCAmelCase )
pipeline.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__ = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowercase__ , lowercase__ = pipe_prior(
_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
lowercase__ = pipeline(
_UpperCAmelCase , image=_UpperCAmelCase , image_embeds=_UpperCAmelCase , negative_image_embeds=_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="""np""" , )
lowercase__ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
| 361 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class A ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCamelCase__ (self : Any ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = TFXLMRobertaModel.from_pretrained("""jplu/tf-xlm-roberta-base""" )
lowercase__ = {
"""input_ids""": tf.convert_to_tensor([[0, 2646, 1_0269, 83, 9_9942, 2]] , dtype=tf.intaa ), # "My dog is cute"
"""attention_mask""": tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
lowercase__ = model(_UpperCAmelCase )["""last_hidden_state"""]
lowercase__ = tf.TensorShape((1, 6, 768) )
self.assertEqual(output.shape , _UpperCAmelCase )
# compare the actual values for a slice.
lowercase__ = tf.convert_to_tensor(
[
[
[0.0_681_762, 0.10_894_451, 0.06_772_504],
[-0.06_423_668, 0.02_366_615, 0.04_329_344],
[-0.06_057_295, 0.09_974_135, -0.00_070_584],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 146 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class UpperCamelCase ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
UpperCamelCase : List[str] = StableUnCLIPImgaImgPipeline
UpperCamelCase : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
UpperCamelCase : Dict = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCamelCase : List[str] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCamelCase : int = frozenset([] )
def _lowercase ( self : Any ) -> Any:
_a : Any = 32
_a : Optional[Any] = embedder_hidden_size
# image encoding components
_a : Any = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
_a : Dict = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=UpperCAmelCase__ , projection_dim=UpperCAmelCase__ , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
_a : Dict = StableUnCLIPImageNormalizer(embedding_dim=UpperCAmelCase__ )
_a : int = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" )
torch.manual_seed(0 )
_a : Any = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
_a : int = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=UpperCAmelCase__ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
_a : str = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=UpperCAmelCase__ , layers_per_block=1 , upcast_attention=UpperCAmelCase__ , use_linear_projection=UpperCAmelCase__ , )
torch.manual_seed(0 )
_a : Dict = DDIMScheduler(
beta_schedule="""scaled_linear""" , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , prediction_type="""v_prediction""" , set_alpha_to_one=UpperCAmelCase__ , steps_offset=1 , )
torch.manual_seed(0 )
_a : List[Any] = AutoencoderKL()
_a : Optional[Any] = {
# image encoding components
"""feature_extractor""": feature_extractor,
"""image_encoder""": image_encoder.eval(),
# image noising components
"""image_normalizer""": image_normalizer.eval(),
"""image_noising_scheduler""": image_noising_scheduler,
# regular denoising components
"""tokenizer""": tokenizer,
"""text_encoder""": text_encoder.eval(),
"""unet""": unet.eval(),
"""scheduler""": scheduler,
"""vae""": vae.eval(),
}
return components
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str]=0 , UpperCAmelCase__ : str=True ) -> str:
if str(UpperCAmelCase__ ).startswith("""mps""" ):
_a : Union[str, Any] = torch.manual_seed(UpperCAmelCase__ )
else:
_a : Dict = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
_a : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase__ ) ).to(UpperCAmelCase__ )
if pil_image:
_a : int = input_image * 0.5 + 0.5
_a : Optional[Any] = input_image.clamp(0 , 1 )
_a : Any = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
_a : List[Any] = DiffusionPipeline.numpy_to_pil(UpperCAmelCase__ )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def _lowercase ( self : int ) -> Any:
_a : List[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
_a : List[str] = self.get_dummy_components()
_a : Any = StableUnCLIPImgaImgPipeline(**UpperCAmelCase__ )
_a : Tuple = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
_a : Tuple = self.get_dummy_inputs(UpperCAmelCase__ )
inputs.update({"""image_embeds""": None} )
_a : List[str] = sd_pipe(**UpperCAmelCase__ ).images
_a : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_a : Dict = np.array([0.3_8_7_2, 0.7_2_2_4, 0.5_6_0_1, 0.4_7_4_1, 0.6_8_7_2, 0.5_8_1_4, 0.4_6_3_6, 0.3_8_6_7, 0.5_0_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _lowercase ( self : List[str] ) -> Dict:
_a : str = torch_device in ["""cpu""", """mps"""]
self._test_attention_slicing_forward_pass(test_max_difference=UpperCAmelCase__ )
def _lowercase ( self : Optional[int] ) -> str:
_a : str = torch_device in ["""cpu""", """mps"""]
self._test_inference_batch_single_identical(test_max_difference=UpperCAmelCase__ )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _lowercase ( self : str ) -> Union[str, Any]:
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=UpperCAmelCase__ )
@slow
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self : Dict ) -> str:
_a : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" )
_a : Optional[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy""" )
_a : int = StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-l-img2img""" , torch_dtype=torch.floataa )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_a : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 )
_a : Optional[Any] = pipe(UpperCAmelCase__ , """anime turle""" , generator=UpperCAmelCase__ , output_type="""np""" )
_a : Union[str, Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCAmelCase__ , UpperCAmelCase__ )
def _lowercase ( self : Union[str, Any] ) -> str:
_a : int = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" )
_a : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy""" )
_a : List[Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-h-img2img""" , torch_dtype=torch.floataa )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_a : Optional[int] = torch.Generator(device="""cpu""" ).manual_seed(0 )
_a : Any = pipe(UpperCAmelCase__ , """anime turle""" , generator=UpperCAmelCase__ , output_type="""np""" )
_a : Union[str, Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCAmelCase__ , UpperCAmelCase__ )
def _lowercase ( self : List[Any] ) -> Union[str, Any]:
_a : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_a : List[str] = StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-h-img2img""" , torch_dtype=torch.floataa )
_a : List[str] = pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_a : int = pipe(
UpperCAmelCase__ , """anime turtle""" , num_inference_steps=2 , output_type="""np""" , )
_a : List[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 294 |
"""simple docstring"""
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ):
'''simple docstring'''
_a : Optional[Any] = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("""All input parameters must be positive""" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("""Relative densities cannot be greater than one""" )
else:
_a : Tuple = 1 - (matter_density + radiation_density + dark_energy)
_a : int = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
_a : List[str] = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
_snake_case = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1e-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 294 | 1 |
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs("""hub/hopper-medium-v2/unet/hor32""", exist_ok=True)
os.makedirs("""hub/hopper-medium-v2/unet/hor128""", exist_ok=True)
os.makedirs("""hub/hopper-medium-v2/value_function""", exist_ok=True)
def lowerCAmelCase( __lowerCamelCase ):
if hor == 128:
__a = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
__a = (32, 128, 256)
__a = ('UpResnetBlock1D', 'UpResnetBlock1D')
elif hor == 32:
__a = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
__a = (32, 64, 128, 256)
__a = ('UpResnetBlock1D', 'UpResnetBlock1D', 'UpResnetBlock1D')
__a = torch.load(f'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''' )
__a = model.state_dict()
__a = {
'down_block_types': down_block_types,
'block_out_channels': block_out_channels,
'up_block_types': up_block_types,
'layers_per_block': 1,
'use_timestep_embedding': True,
'out_block_type': 'OutConv1DBlock',
'norm_num_groups': 8,
'downsample_each_block': False,
'in_channels': 14,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'flip_sin_to_cos': False,
'freq_shift': 1,
'sample_size': 6_5536,
'mid_block_type': 'MidResTemporalBlock1D',
'act_fn': 'mish',
}
__a = UNetaDModel(**__lowerCamelCase )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
__a = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
__a = state_dict.pop(__lowerCamelCase )
hf_value_function.load_state_dict(__lowerCamelCase )
torch.save(hf_value_function.state_dict() , f'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''' )
with open(f'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' , 'w' ) as f:
json.dump(__lowerCamelCase , __lowerCamelCase )
def lowerCAmelCase( ):
__a = {
'in_channels': 14,
'down_block_types': ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D'),
'up_block_types': (),
'out_block_type': 'ValueFunction',
'mid_block_type': 'ValueFunctionMidBlock1D',
'block_out_channels': (32, 64, 128, 256),
'layers_per_block': 1,
'downsample_each_block': True,
'sample_size': 6_5536,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'use_timestep_embedding': True,
'flip_sin_to_cos': False,
'freq_shift': 1,
'norm_num_groups': 8,
'act_fn': 'mish',
}
__a = torch.load('/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch' )
__a = model
__a = UNetaDModel(**__lowerCamelCase )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
__a = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
__a = state_dict.pop(__lowerCamelCase )
hf_value_function.load_state_dict(__lowerCamelCase )
torch.save(hf_value_function.state_dict() , 'hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin' )
with open('hub/hopper-medium-v2/value_function/config.json' , 'w' ) as f:
json.dump(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 197 | from collections import defaultdict
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase ):
__a = first_str.lower().strip()
__a = second_str.lower().strip()
# Remove whitespace
__a = first_str.replace(' ' , '' )
__a = second_str.replace(' ' , '' )
# Strings of different lengths are not anagrams
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
return False
# Default values for count should be 0
__a = defaultdict(__lowerCamelCase )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(__lowerCamelCase ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
lowerCamelCase_ : List[str] = input("""Enter the first string """).strip()
lowerCamelCase_ : Optional[Any] = input("""Enter the second string """).strip()
lowerCamelCase_ : str = check_anagrams(input_a, input_b)
print(F'''{input_a} and {input_b} are {'' if status else 'not '}anagrams.''')
| 197 | 1 |
'''simple docstring'''
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
snake_case_ : Any = TypeVar("KT")
snake_case_ : Optional[int] = TypeVar("VT")
class __a (Generic[KT, VT] ):
def __init__( self : str , __magic_name__ : KT | str = "root" , __magic_name__ : VT | None = None ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Tuple = key
UpperCAmelCase_ : int = value
UpperCAmelCase_ : list[Node[KT, VT]] = []
def __repr__( self : List[Any] ) -> str:
"""simple docstring"""
return F"""Node({self.key}: {self.value})"""
@property
def UpperCAmelCase__ ( self : List[Any] ) -> int:
"""simple docstring"""
return len(self.forward )
class __a (Generic[KT, VT] ):
def __init__( self : Optional[int] , __magic_name__ : float = 0.5 , __magic_name__ : int = 16 ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Node[KT, VT] = Node[KT, VT]()
UpperCAmelCase_ : str = 0
UpperCAmelCase_ : Optional[Any] = p
UpperCAmelCase_ : Tuple = max_level
def __str__( self : Tuple ) -> str:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = list(self )
if len(__magic_name__ ) == 0:
return F"""SkipList(level={self.level})"""
UpperCAmelCase_ : List[Any] = max((len(str(__magic_name__ ) ) for item in items) , default=4 )
UpperCAmelCase_ : int = max(__magic_name__ , 4 ) + 4
UpperCAmelCase_ : Dict = self.head
UpperCAmelCase_ : str = []
UpperCAmelCase_ : List[Any] = node.forward.copy()
lines.append(F"""[{node.key}]""".ljust(__magic_name__ , '''-''' ) + '''* ''' * len(__magic_name__ ) )
lines.append(''' ''' * label_size + '''| ''' * len(__magic_name__ ) )
while len(node.forward ) != 0:
UpperCAmelCase_ : Dict = node.forward[0]
lines.append(
F"""[{node.key}]""".ljust(__magic_name__ , '''-''' )
+ ''' '''.join(str(n.key ) if n.key == node.key else '''|''' for n in forwards ) )
lines.append(''' ''' * label_size + '''| ''' * len(__magic_name__ ) )
UpperCAmelCase_ : int = node.forward
lines.append('''None'''.ljust(__magic_name__ ) + '''* ''' * len(__magic_name__ ) )
return F"""SkipList(level={self.level})\n""" + "\n".join(__magic_name__ )
def __iter__( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : str = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
UpperCAmelCase_ : Any = node.forward[0]
def UpperCAmelCase__ ( self : List[str] ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Dict = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : str ) -> tuple[Node[KT, VT] | None, list[Node[KT, VT]]]:
"""simple docstring"""
UpperCAmelCase_ : int = []
UpperCAmelCase_ : Tuple = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
UpperCAmelCase_ : str = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(__magic_name__ )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def UpperCAmelCase__ ( self : Dict , __magic_name__ : KT ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self._locate_node(__magic_name__ )
if node is not None:
for i, update_node in enumerate(__magic_name__ ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
UpperCAmelCase_ : int = node.forward[i]
else:
UpperCAmelCase_ : Any = update_node.forward[:i]
def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : KT , __magic_name__ : VT ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self._locate_node(__magic_name__ )
if node is not None:
UpperCAmelCase_ : Dict = value
else:
UpperCAmelCase_ : Dict = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , __magic_name__ ):
update_vector.append(self.head )
UpperCAmelCase_ : Optional[int] = level
UpperCAmelCase_ : List[Any] = Node(__magic_name__ , __magic_name__ )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(__magic_name__ )
else:
UpperCAmelCase_ : Optional[Any] = new_node
def UpperCAmelCase__ ( self : Dict , __magic_name__ : VT ) -> VT | None:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self._locate_node(__magic_name__ )
if node is not None:
return node.value
return None
def lowerCamelCase_ ( ) -> Dict:
UpperCAmelCase_ : Union[str, Any] = SkipList()
skip_list.insert('''Key1''', 3 )
skip_list.insert('''Key2''', 12 )
skip_list.insert('''Key3''', 41 )
skip_list.insert('''Key4''', -19 )
UpperCAmelCase_ : Dict = skip_list.head
UpperCAmelCase_ : List[Any] = {}
while node.level != 0:
UpperCAmelCase_ : int = node.forward[0]
UpperCAmelCase_ : List[Any] = node.value
assert len(SCREAMING_SNAKE_CASE__ ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def lowerCamelCase_ ( ) -> Tuple:
UpperCAmelCase_ : List[str] = SkipList()
skip_list.insert('''Key1''', 10 )
skip_list.insert('''Key1''', 12 )
skip_list.insert('''Key5''', 7 )
skip_list.insert('''Key7''', 10 )
skip_list.insert('''Key10''', 5 )
skip_list.insert('''Key7''', 7 )
skip_list.insert('''Key5''', 5 )
skip_list.insert('''Key10''', 10 )
UpperCAmelCase_ : str = skip_list.head
UpperCAmelCase_ : Optional[int] = {}
while node.level != 0:
UpperCAmelCase_ : int = node.forward[0]
UpperCAmelCase_ : str = node.value
if len(SCREAMING_SNAKE_CASE__ ) != 4:
print()
assert len(SCREAMING_SNAKE_CASE__ ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def lowerCamelCase_ ( ) -> Union[str, Any]:
UpperCAmelCase_ : Union[str, Any] = SkipList()
assert skip_list.find('''Some key''' ) is None
def lowerCamelCase_ ( ) -> Dict:
UpperCAmelCase_ : int = SkipList()
skip_list.insert('''Key2''', 20 )
assert skip_list.find('''Key2''' ) == 20
skip_list.insert('''Some Key''', 10 )
skip_list.insert('''Key2''', 8 )
skip_list.insert('''V''', 13 )
assert skip_list.find('''Y''' ) is None
assert skip_list.find('''Key2''' ) == 8
assert skip_list.find('''Some Key''' ) == 10
assert skip_list.find('''V''' ) == 13
def lowerCamelCase_ ( ) -> Dict:
UpperCAmelCase_ : Union[str, Any] = SkipList()
skip_list.delete('''Some key''' )
assert len(skip_list.head.forward ) == 0
def lowerCamelCase_ ( ) -> Any:
UpperCAmelCase_ : Any = SkipList()
skip_list.insert('''Key1''', 12 )
skip_list.insert('''V''', 13 )
skip_list.insert('''X''', 14 )
skip_list.insert('''Key2''', 15 )
skip_list.delete('''V''' )
skip_list.delete('''Key2''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''Key2''' ) is None
def lowerCamelCase_ ( ) -> List[str]:
UpperCAmelCase_ : Optional[Any] = SkipList()
skip_list.insert('''Key1''', 12 )
skip_list.insert('''V''', 13 )
skip_list.insert('''X''', 14 )
skip_list.insert('''Key2''', 15 )
skip_list.delete('''V''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) == 14
assert skip_list.find('''Key1''' ) == 12
assert skip_list.find('''Key2''' ) == 15
skip_list.delete('''X''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) == 12
assert skip_list.find('''Key2''' ) == 15
skip_list.delete('''Key1''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) is None
assert skip_list.find('''Key2''' ) == 15
skip_list.delete('''Key2''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) is None
assert skip_list.find('''Key2''' ) is None
def lowerCamelCase_ ( ) -> Any:
UpperCAmelCase_ : Optional[Any] = SkipList()
skip_list.insert('''Key1''', 12 )
skip_list.insert('''V''', 13 )
skip_list.insert('''X''', 142 )
skip_list.insert('''Key2''', 15 )
skip_list.delete('''X''' )
def traverse_keys(SCREAMING_SNAKE_CASE__ : str ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(SCREAMING_SNAKE_CASE__ )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def lowerCamelCase_ ( ) -> Optional[Any]:
def is_sorted(SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
return all(next_item >= item for item, next_item in zip(SCREAMING_SNAKE_CASE__, lst[1:] ) )
UpperCAmelCase_ : int = SkipList()
for i in range(10 ):
skip_list.insert(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
assert is_sorted(list(SCREAMING_SNAKE_CASE__ ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(SCREAMING_SNAKE_CASE__ ) )
skip_list.insert(-12, -12 )
skip_list.insert(77, 77 )
assert is_sorted(list(SCREAMING_SNAKE_CASE__ ) )
def lowerCamelCase_ ( ) -> Optional[Any]:
for _ in range(100 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def lowerCamelCase_ ( ) -> Optional[Any]:
UpperCAmelCase_ : Tuple = SkipList()
skip_list.insert(2, '''2''' )
skip_list.insert(4, '''4''' )
skip_list.insert(6, '''4''' )
skip_list.insert(4, '''5''' )
skip_list.insert(8, '''4''' )
skip_list.insert(9, '''4''' )
skip_list.delete(4 )
print(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 125 |
'''simple docstring'''
snake_case_ : List[str] = {
"Pillow": "Pillow<10.0.0",
"accelerate": "accelerate>=0.20.3",
"av": "av==9.2.0",
"beautifulsoup4": "beautifulsoup4",
"black": "black~=23.1",
"codecarbon": "codecarbon==1.2.0",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"decord": "decord==0.6.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"fairscale": "fairscale>0.3",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.14.1,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2,<=0.4.13",
"jaxlib": "jaxlib>=0.1.65,<=0.4.13",
"jieba": "jieba",
"kenlm": "kenlm",
"keras-nlp": "keras-nlp>=0.3.1",
"librosa": "librosa",
"nltk": "nltk",
"natten": "natten>=0.14.6",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic<2",
"pytest": "pytest>=7.2.0",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"python": "python>=3.8.0",
"ray[tune]": "ray[tune]",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff>=0.0.241,<=0.0.259",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.3.1",
"sagemaker": "sagemaker>=2.31.0",
"scikit-learn": "scikit-learn",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorflow-cpu": "tensorflow-cpu>=2.6,<2.14",
"tensorflow": "tensorflow>=2.6,<2.14",
"tensorflow-text": "tensorflow-text<2.14",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"timm": "timm",
"tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14",
"torch": "torch>=1.9,!=1.12.0",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
}
| 125 | 1 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__A : List[str] = logging.get_logger(__name__)
__A : Optional[int] = {'vocab_file': 'spiece.model'}
__A : Union[str, Any] = {
'vocab_file': {
'TsinghuaAI/CPM-Generate': 'https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model',
}
}
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<sep>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<cls>" , _SCREAMING_SNAKE_CASE="<mask>" , _SCREAMING_SNAKE_CASE=["<eop>", "<eod>"] , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )-> None:
lowerCamelCase_ =AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else mask_token
lowerCamelCase_ ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_SCREAMING_SNAKE_CASE , remove_space=_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , additional_special_tokens=_SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **_SCREAMING_SNAKE_CASE , )
lowerCamelCase_ =3
lowerCamelCase_ =do_lower_case
lowerCamelCase_ =remove_space
lowerCamelCase_ =keep_accents
lowerCamelCase_ =vocab_file
lowerCamelCase_ =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_SCREAMING_SNAKE_CASE )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"""You need to install jieba to use CpmTokenizer or CpmTokenizerFast. """
"""See https://pypi.org/project/jieba/ for installation.""" )
lowerCamelCase_ =jieba
lowerCamelCase_ =str.maketrans(""" \n""" , """\u2582\u2583""" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def _snake_case ( self )-> Tuple:
return len(self.sp_model )
def _snake_case ( self )-> Union[str, Any]:
lowerCamelCase_ ={self.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self )-> str:
lowerCamelCase_ =self.__dict__.copy()
lowerCamelCase_ =None
return state
def __setstate__( self , _SCREAMING_SNAKE_CASE )-> List[str]:
lowerCamelCase_ =d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowerCamelCase_ ={}
lowerCamelCase_ =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> List[Any]:
if self.remove_space:
lowerCamelCase_ =""" """.join(inputs.strip().split() )
else:
lowerCamelCase_ =inputs
lowerCamelCase_ =outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
lowerCamelCase_ =unicodedata.normalize("""NFKD""" , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ ="""""".join([c for c in outputs if not unicodedata.combining(_SCREAMING_SNAKE_CASE )] )
if self.do_lower_case:
lowerCamelCase_ =outputs.lower()
return outputs
def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> List[str]:
lowerCamelCase_ =self.preprocess_text(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =self.sp_model.encode(_SCREAMING_SNAKE_CASE , out_type=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =[]
for piece in pieces:
if len(_SCREAMING_SNAKE_CASE ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
lowerCamelCase_ =self.sp_model.EncodeAsPieces(piece[:-1].replace(_SCREAMING_SNAKE_CASE , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCamelCase_ =cur_pieces[1:]
else:
lowerCamelCase_ =cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_SCREAMING_SNAKE_CASE )
else:
new_pieces.append(_SCREAMING_SNAKE_CASE )
return new_pieces
def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> Dict:
return self.sp_model.PieceToId(_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> Any:
return self.sp_model.IdToPiece(_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> int:
lowerCamelCase_ ="""""".join(_SCREAMING_SNAKE_CASE ).replace(_SCREAMING_SNAKE_CASE , """ """ ).strip()
return out_string
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None )-> List[int]:
lowerCamelCase_ =[self.sep_token_id]
lowerCamelCase_ =[self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False )-> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_SCREAMING_SNAKE_CASE , token_ids_a=_SCREAMING_SNAKE_CASE , already_has_special_tokens=_SCREAMING_SNAKE_CASE )
if token_ids_a is not None:
return ([0] * len(_SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1, 1]
return ([0] * len(_SCREAMING_SNAKE_CASE )) + [1, 1]
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None )-> List[int]:
lowerCamelCase_ =[self.sep_token_id]
lowerCamelCase_ =[2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None )-> Tuple[str]:
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCamelCase_ =os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(_SCREAMING_SNAKE_CASE , """wb""" ) as fi:
lowerCamelCase_ =self.sp_model.serialized_model_proto()
fi.write(_SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
def _snake_case ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )-> Union[str, Any]:
lowerCamelCase_ =super()._decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =text.replace(""" """ , """""" ).replace("""\u2582""" , """ """ ).replace("""\u2583""" , """\n""" )
return text
| 351 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
__A : int = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n'
__A : Any = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n'
__A : Union[str, Any] = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _SCREAMING_SNAKE_CASE ( datasets.Metric):
def _snake_case ( self )-> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/krishnap25/mauve""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/krishnap25/mauve"""] , reference_urls=[
"""https://arxiv.org/abs/2102.01454""",
"""https://github.com/krishnap25/mauve""",
] , )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="auto" , _SCREAMING_SNAKE_CASE=-1 , _SCREAMING_SNAKE_CASE=0.9 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=500 , _SCREAMING_SNAKE_CASE="gpt2-large" , _SCREAMING_SNAKE_CASE=-1 , _SCREAMING_SNAKE_CASE=1024 , _SCREAMING_SNAKE_CASE=25 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=25 , )-> List[str]:
lowerCamelCase_ =compute_mauve(
p_text=_SCREAMING_SNAKE_CASE , q_text=_SCREAMING_SNAKE_CASE , p_features=_SCREAMING_SNAKE_CASE , q_features=_SCREAMING_SNAKE_CASE , p_tokens=_SCREAMING_SNAKE_CASE , q_tokens=_SCREAMING_SNAKE_CASE , num_buckets=_SCREAMING_SNAKE_CASE , pca_max_data=_SCREAMING_SNAKE_CASE , kmeans_explained_var=_SCREAMING_SNAKE_CASE , kmeans_num_redo=_SCREAMING_SNAKE_CASE , kmeans_max_iter=_SCREAMING_SNAKE_CASE , featurize_model_name=_SCREAMING_SNAKE_CASE , device_id=_SCREAMING_SNAKE_CASE , max_text_length=_SCREAMING_SNAKE_CASE , divergence_curve_discretization_size=_SCREAMING_SNAKE_CASE , mauve_scaling_factor=_SCREAMING_SNAKE_CASE , verbose=_SCREAMING_SNAKE_CASE , seed=_SCREAMING_SNAKE_CASE , )
return out
| 49 | 0 |
'''simple docstring'''
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class _lowercase ( _lowercase ):
a = (DDPMParallelScheduler,)
def lowerCamelCase_ ( self: Union[str, Any] , **UpperCamelCase__: str ):
lowerCamelCase__ : str = {
"""num_train_timesteps""": 1_000,
"""beta_start""": 0.0_001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""variance_type""": """fixed_small""",
"""clip_sample""": True,
}
config.update(**UpperCamelCase__ )
return config
def lowerCamelCase_ ( self: Tuple ):
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ )
def lowerCamelCase_ ( self: List[str] ):
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=UpperCamelCase__ , beta_end=UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[int] ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCamelCase__ )
def lowerCamelCase_ ( self: List[Any] ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=UpperCamelCase__ )
def lowerCamelCase_ ( self: Dict ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] ):
self.check_over_configs(thresholding=UpperCamelCase__ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCamelCase__ , prediction_type=UpperCamelCase__ , sample_max_value=UpperCamelCase__ , )
def lowerCamelCase_ ( self: str ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCamelCase__ )
def lowerCamelCase_ ( self: List[Any] ):
for t in [0, 500, 999]:
self.check_over_forward(time_step=UpperCamelCase__ )
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : int = self.scheduler_classes[0]
lowerCamelCase__ : Union[str, Any] = self.get_scheduler_config()
lowerCamelCase__ : Union[str, Any] = scheduler_class(**UpperCamelCase__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00_979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1e-5
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : int = self.scheduler_classes[0]
lowerCamelCase__ : List[Any] = self.get_scheduler_config()
lowerCamelCase__ : List[str] = scheduler_class(**UpperCamelCase__ )
lowerCamelCase__ : str = len(UpperCamelCase__ )
lowerCamelCase__ : str = self.dummy_model()
lowerCamelCase__ : int = self.dummy_sample_deter
lowerCamelCase__ : Optional[int] = self.dummy_sample_deter + 0.1
lowerCamelCase__ : Optional[int] = self.dummy_sample_deter - 0.1
lowerCamelCase__ : Union[str, Any] = samplea.shape[0]
lowerCamelCase__ : Union[str, Any] = torch.stack([samplea, samplea, samplea] , dim=0 )
lowerCamelCase__ : str = torch.arange(UpperCamelCase__ )[0:3, None].repeat(1 , UpperCamelCase__ )
lowerCamelCase__ : Tuple = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
lowerCamelCase__ : Dict = scheduler.batch_step_no_noise(UpperCamelCase__ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
lowerCamelCase__ : Union[str, Any] = torch.sum(torch.abs(UpperCamelCase__ ) )
lowerCamelCase__ : List[str] = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 1_153.1_833 ) < 1e-2
assert abs(result_mean.item() - 0.5_005 ) < 1e-3
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ : List[Any] = self.scheduler_classes[0]
lowerCamelCase__ : Dict = self.get_scheduler_config()
lowerCamelCase__ : List[Any] = scheduler_class(**UpperCamelCase__ )
lowerCamelCase__ : List[Any] = len(UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = self.dummy_model()
lowerCamelCase__ : int = self.dummy_sample_deter
lowerCamelCase__ : Optional[int] = torch.manual_seed(0 )
for t in reversed(range(UpperCamelCase__ ) ):
# 1. predict noise residual
lowerCamelCase__ : Dict = model(UpperCamelCase__ , UpperCamelCase__ )
# 2. predict previous mean of sample x_t-1
lowerCamelCase__ : Any = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__ ).prev_sample
lowerCamelCase__ : List[str] = pred_prev_sample
lowerCamelCase__ : List[Any] = torch.sum(torch.abs(UpperCamelCase__ ) )
lowerCamelCase__ : Optional[int] = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 258.9_606 ) < 1e-2
assert abs(result_mean.item() - 0.3_372 ) < 1e-3
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : Optional[Any] = self.scheduler_classes[0]
lowerCamelCase__ : Any = self.get_scheduler_config(prediction_type="""v_prediction""" )
lowerCamelCase__ : Any = scheduler_class(**UpperCamelCase__ )
lowerCamelCase__ : int = len(UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = self.dummy_model()
lowerCamelCase__ : List[Any] = self.dummy_sample_deter
lowerCamelCase__ : List[str] = torch.manual_seed(0 )
for t in reversed(range(UpperCamelCase__ ) ):
# 1. predict noise residual
lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ , UpperCamelCase__ )
# 2. predict previous mean of sample x_t-1
lowerCamelCase__ : List[str] = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__ ).prev_sample
lowerCamelCase__ : List[Any] = pred_prev_sample
lowerCamelCase__ : Union[str, Any] = torch.sum(torch.abs(UpperCamelCase__ ) )
lowerCamelCase__ : List[str] = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 202.0_296 ) < 1e-2
assert abs(result_mean.item() - 0.2_631 ) < 1e-3
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Dict = self.scheduler_classes[0]
lowerCamelCase__ : List[str] = self.get_scheduler_config()
lowerCamelCase__ : Optional[int] = scheduler_class(**UpperCamelCase__ )
lowerCamelCase__ : List[str] = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=UpperCamelCase__ )
lowerCamelCase__ : Any = scheduler.timesteps
for i, timestep in enumerate(UpperCamelCase__ ):
if i == len(UpperCamelCase__ ) - 1:
lowerCamelCase__ : List[str] = -1
else:
lowerCamelCase__ : int = timesteps[i + 1]
lowerCamelCase__ : List[Any] = scheduler.previous_timestep(UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = prev_t.item()
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Optional[int] = self.scheduler_classes[0]
lowerCamelCase__ : Union[str, Any] = self.get_scheduler_config()
lowerCamelCase__ : Any = scheduler_class(**UpperCamelCase__ )
lowerCamelCase__ : List[str] = [100, 87, 50, 51, 0]
with self.assertRaises(UpperCamelCase__ , msg="""`custom_timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=UpperCamelCase__ )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : Tuple = self.scheduler_classes[0]
lowerCamelCase__ : Dict = self.get_scheduler_config()
lowerCamelCase__ : str = scheduler_class(**UpperCamelCase__ )
lowerCamelCase__ : Dict = [100, 87, 50, 1, 0]
lowerCamelCase__ : List[str] = len(UpperCamelCase__ )
with self.assertRaises(UpperCamelCase__ , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=UpperCamelCase__ , timesteps=UpperCamelCase__ )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : Union[str, Any] = self.scheduler_classes[0]
lowerCamelCase__ : Tuple = self.get_scheduler_config()
lowerCamelCase__ : List[Any] = scheduler_class(**UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCamelCase__ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=UpperCamelCase__ )
| 41 |
from math import isclose, sqrt
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> tuple[float, float, float]:
__lowerCamelCase : Tuple = point_y / 4 / point_x
__lowerCamelCase : Tuple = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
__lowerCamelCase : List[Any] = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
__lowerCamelCase : int = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
__lowerCamelCase : Any = outgoing_gradient**2 + 4
__lowerCamelCase : Optional[int] = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
__lowerCamelCase : str = (point_y - outgoing_gradient * point_x) ** 2 - 1_0_0
__lowerCamelCase : str = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
__lowerCamelCase : Optional[Any] = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
__lowerCamelCase : Optional[Any] = x_minus if isclose(lowerCamelCase__ , lowerCamelCase__ ) else x_plus
__lowerCamelCase : Tuple = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ = 1.4 , lowerCamelCase__ = -9.6 ) -> int:
__lowerCamelCase : int = 0
__lowerCamelCase : float = first_x_coord
__lowerCamelCase : float = first_y_coord
__lowerCamelCase : float = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Any = next_point(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(F"""{solution() = }""")
| 73 | 0 |
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = (DDIMParallelScheduler,)
UpperCAmelCase__ = (('''eta''', 0.0), ('''num_inference_steps''', 50))
def SCREAMING_SNAKE_CASE ( self : int , **UpperCAmelCase__ : Any) ->List[str]:
'''simple docstring'''
A__ = {
'''num_train_timesteps''': 1_000,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''clip_sample''': True,
}
config.update(**UpperCAmelCase__)
return config
def SCREAMING_SNAKE_CASE ( self : Optional[int] , **UpperCAmelCase__ : Tuple) ->Optional[Any]:
'''simple docstring'''
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(**UpperCAmelCase__)
A__ = scheduler_class(**UpperCAmelCase__)
A__ , A__ = 10, 0.0
A__ = self.dummy_model()
A__ = self.dummy_sample_deter
scheduler.set_timesteps(UpperCAmelCase__)
for t in scheduler.timesteps:
A__ = model(UpperCAmelCase__ , UpperCAmelCase__)
A__ = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__).prev_sample
return sample
def SCREAMING_SNAKE_CASE ( self : List[str]) ->str:
'''simple docstring'''
for timesteps in [100, 500, 1_000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Tuple) ->str:
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=UpperCAmelCase__)
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(steps_offset=1)
A__ = scheduler_class(**UpperCAmelCase__)
scheduler.set_timesteps(5)
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1]))
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Tuple:
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2]):
self.check_over_configs(beta_start=UpperCAmelCase__ , beta_end=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Dict) ->Union[str, Any]:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Any) ->str:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Optional[int]:
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Union[str, Any]:
'''simple docstring'''
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Any) ->List[str]:
'''simple docstring'''
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : str) ->Any:
'''simple docstring'''
self.check_over_configs(thresholding=UpperCAmelCase__)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCAmelCase__ , prediction_type=UpperCAmelCase__ , sample_max_value=UpperCAmelCase__ , )
def SCREAMING_SNAKE_CASE ( self : str) ->Union[str, Any]:
'''simple docstring'''
for t in [1, 10, 49]:
self.check_over_forward(time_step=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Optional[int]:
'''simple docstring'''
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500]):
self.check_over_forward(time_step=UpperCAmelCase__ , num_inference_steps=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : str) ->Tuple:
'''simple docstring'''
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0]):
self.check_over_forward(time_step=UpperCAmelCase__ , eta=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[Any]:
'''simple docstring'''
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**UpperCAmelCase__)
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0) - 0.0)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400) - 0.14771)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960) - 0.32460)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0) - 0.0)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486) - 0.00979)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998) - 0.02)) < 1e-5
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->int:
'''simple docstring'''
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**UpperCAmelCase__)
A__ , A__ = 10, 0.0
scheduler.set_timesteps(UpperCAmelCase__)
A__ = self.dummy_model()
A__ = self.dummy_sample_deter
A__ = self.dummy_sample_deter + 0.1
A__ = self.dummy_sample_deter - 0.1
A__ = samplea.shape[0]
A__ = torch.stack([samplea, samplea, samplea] , dim=0)
A__ = torch.arange(UpperCAmelCase__)[0:3, None].repeat(1 , UpperCAmelCase__)
A__ = model(samples.flatten(0 , 1) , timesteps.flatten(0 , 1))
A__ = scheduler.batch_step_no_noise(UpperCAmelCase__ , timesteps.flatten(0 , 1) , samples.flatten(0 , 1) , UpperCAmelCase__)
A__ = torch.sum(torch.abs(UpperCAmelCase__))
A__ = torch.mean(torch.abs(UpperCAmelCase__))
assert abs(result_sum.item() - 1147.7904) < 1e-2
assert abs(result_mean.item() - 0.4982) < 1e-3
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->List[str]:
'''simple docstring'''
A__ = self.full_loop()
A__ = torch.sum(torch.abs(UpperCAmelCase__))
A__ = torch.mean(torch.abs(UpperCAmelCase__))
assert abs(result_sum.item() - 172.0067) < 1e-2
assert abs(result_mean.item() - 0.223967) < 1e-3
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Optional[Any]:
'''simple docstring'''
A__ = self.full_loop(prediction_type='''v_prediction''')
A__ = torch.sum(torch.abs(UpperCAmelCase__))
A__ = torch.mean(torch.abs(UpperCAmelCase__))
assert abs(result_sum.item() - 52.5302) < 1e-2
assert abs(result_mean.item() - 0.0684) < 1e-3
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Union[str, Any]:
'''simple docstring'''
A__ = self.full_loop(set_alpha_to_one=UpperCAmelCase__ , beta_start=0.01)
A__ = torch.sum(torch.abs(UpperCAmelCase__))
A__ = torch.mean(torch.abs(UpperCAmelCase__))
assert abs(result_sum.item() - 149.8295) < 1e-2
assert abs(result_mean.item() - 0.1951) < 1e-3
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Optional[Any]:
'''simple docstring'''
A__ = self.full_loop(set_alpha_to_one=UpperCAmelCase__ , beta_start=0.01)
A__ = torch.sum(torch.abs(UpperCAmelCase__))
A__ = torch.mean(torch.abs(UpperCAmelCase__))
assert abs(result_sum.item() - 149.0784) < 1e-2
assert abs(result_mean.item() - 0.1941) < 1e-3
| 367 |
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Tuple:
"""simple docstring"""
A__ = AutoConfig.from_pretrained(lowercase_ )
A__ = FlaxAutoModelForSeqaSeqLM.from_config(config=lowercase_ )
A__ = checkpoints.load_tax_checkpoint(lowercase_ )
A__ = '''wi_0''' in tax_model['''target''']['''encoder''']['''layers_0''']['''mlp''']
if config.model_type == "t5":
A__ = '''SelfAttention'''
if config.model_type == "longt5" and config.encoder_attention_type == "local":
A__ = '''LocalSelfAttention'''
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
A__ = '''TransientGlobalSelfAttention'''
else:
raise ValueError(
'''Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`'''
''' attribute with a value from [\'local\', \'transient-global].''' )
# Encoder
for layer_index in range(config.num_layers ):
A__ = f"""layers_{str(lowercase_ )}"""
# Self-Attention
A__ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''key''']['''kernel''']
A__ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''out''']['''kernel''']
A__ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''query''']['''kernel''']
A__ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''value''']['''kernel''']
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
A__ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''T5LayerNorm_0''']['''scale''']
# Layer Normalization
A__ = tax_model['''target''']['''encoder'''][layer_name]['''pre_attention_layer_norm''']['''scale''']
if split_mlp_wi:
A__ = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
A__ = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
A__ = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
A__ = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
A__ = tax_model['''target''']['''encoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
A__ = flax_model.params['''encoder''']['''block'''][str(lowercase_ )]['''layer''']
A__ = tax_attention_key
A__ = tax_attention_out
A__ = tax_attention_query
A__ = tax_attention_value
A__ = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
A__ = tax_global_layer_norm
if split_mlp_wi:
A__ = tax_mlp_wi_a
A__ = tax_mlp_wi_a
else:
A__ = tax_mlp_wi
A__ = tax_mlp_wo
A__ = tax_mlp_layer_norm
A__ = flax_model_encoder_layer_block
# Only for layer 0:
A__ = tax_model['''target''']['''encoder''']['''relpos_bias''']['''rel_embedding'''].T
A__ = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
A__ = tax_model['''target''']['''encoder''']['''side_relpos_bias''']['''rel_embedding'''].T
A__ = tax_encoder_global_rel_embedding
# Assigning
A__ = tax_model['''target''']['''encoder''']['''encoder_norm''']['''scale''']
A__ = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
A__ = f"""layers_{str(lowercase_ )}"""
# Self-Attention
A__ = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''key''']['''kernel''']
A__ = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''out''']['''kernel''']
A__ = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''query''']['''kernel''']
A__ = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''value''']['''kernel''']
# Layer Normalization
A__ = tax_model['''target''']['''decoder'''][layer_name]['''pre_self_attention_layer_norm'''][
'''scale'''
]
# Encoder-Decoder-Attention
A__ = tax_model['''target''']['''decoder'''][layer_name]['''encoder_decoder_attention''']
A__ = tax_enc_dec_attention_module['''key''']['''kernel''']
A__ = tax_enc_dec_attention_module['''out''']['''kernel''']
A__ = tax_enc_dec_attention_module['''query''']['''kernel''']
A__ = tax_enc_dec_attention_module['''value''']['''kernel''']
# Layer Normalization
A__ = tax_model['''target''']['''decoder'''][layer_name]['''pre_cross_attention_layer_norm''']['''scale''']
# MLP
if split_mlp_wi:
A__ = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
A__ = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
A__ = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
A__ = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
A__ = tax_model['''target''']['''decoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
A__ = flax_model.params['''decoder''']['''block'''][str(lowercase_ )]['''layer''']
A__ = tax_attention_key
A__ = tax_attention_out
A__ = tax_attention_query
A__ = tax_attention_value
A__ = tax_pre_attention_layer_norm
A__ = tax_enc_dec_attention_key
A__ = tax_enc_dec_attention_out
A__ = tax_enc_dec_attention_query
A__ = tax_enc_dec_attention_value
A__ = tax_cross_layer_norm
if split_mlp_wi:
A__ = tax_mlp_wi_a
A__ = tax_mlp_wi_a
else:
A__ = tax_mlp_wi
A__ = tax_mlp_wo
A__ = txa_mlp_layer_norm
A__ = flax_model_decoder_layer_block
# Decoder Normalization
A__ = tax_model['''target''']['''decoder''']['''decoder_norm''']['''scale''']
A__ = txa_decoder_norm
# Only for layer 0:
A__ = tax_model['''target''']['''decoder''']['''relpos_bias''']['''rel_embedding'''].T
A__ = tax_decoder_rel_embedding
# Token Embeddings
A__ = tax_model['''target''']['''token_embedder''']['''embedding''']
A__ = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
A__ = tax_model['''target''']['''decoder''']['''logits_dense''']['''kernel''']
flax_model.save_pretrained(lowercase_ )
print('''T5X Model was sucessfully converted!''' )
if __name__ == "__main__":
_lowerCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path the T5X checkpoint."""
)
parser.add_argument("""--config_name""", default=None, type=str, required=True, help="""Config name of LongT5/T5 model.""")
parser.add_argument(
"""--flax_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output FLAX model."""
)
_lowerCamelCase : Tuple = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 231 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
UpperCAmelCase__ = None
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = '▁'
UpperCAmelCase__ = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase__ = {
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'},
'tokenizer_file': {
'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'
},
}
UpperCAmelCase__ = {
'google/pegasus-xsum': 5_1_2,
}
class a ( lowerCAmelCase_ ):
_snake_case : List[str] = VOCAB_FILES_NAMES
_snake_case : Tuple = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : List[Any] = PegasusTokenizer
_snake_case : Any = ['input_ids', 'attention_mask']
def __init__( self : int , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : Dict="<pad>" , __lowerCAmelCase : int="</s>" , __lowerCAmelCase : str="<unk>" , __lowerCAmelCase : List[str]="<mask_2>" , __lowerCAmelCase : Any="<mask_1>" , __lowerCAmelCase : str=None , __lowerCAmelCase : Any=103 , **__lowerCAmelCase : Dict , ):
_UpperCAmelCase = offset
if additional_special_tokens is not None:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise TypeError(
f'''additional_special_tokens should be of type {type(lowerCAmelCase__ )}, but is'''
f''' {type(lowerCAmelCase__ )}''' )
_UpperCAmelCase = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'''<unk_{i}>''' for i in range(len(lowerCAmelCase__ ) , self.offset - 1 )
]
if len(set(lowerCAmelCase__ ) ) != len(lowerCAmelCase__ ):
raise ValueError(
"""Please make sure that the provided additional_special_tokens do not contain an incorrectly"""
f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
_UpperCAmelCase = additional_special_tokens_extended
else:
_UpperCAmelCase = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )]
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , mask_token_sent=lowerCAmelCase__ , offset=lowerCAmelCase__ , additional_special_tokens=lowerCAmelCase__ , **lowerCAmelCase__ , )
_UpperCAmelCase = vocab_file
_UpperCAmelCase = False if not self.vocab_file else True
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : Dict ):
_UpperCAmelCase = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
"""There should be 3 special tokens: mask_token, pad_token, and eos_token +"""
f''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' )
return [1 if x in all_special_ids else 0 for x in seq]
def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : List , __lowerCAmelCase : Optional[List] = None , __lowerCAmelCase : bool = False ):
if already_has_special_tokens:
return self._special_token_mask(lowerCAmelCase__ )
elif token_ids_a is None:
return self._special_token_mask(lowerCAmelCase__ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any]=None ):
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_UpperCAmelCase = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ):
copyfile(self.vocab_file , lowerCAmelCase__ )
return (out_vocab_file,)
| 289 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[int]=7 , lowerCAmelCase__ : List[Any]=3 , lowerCAmelCase__ : Optional[Any]=18 , lowerCAmelCase__ : Union[str, Any]=30 , lowerCAmelCase__ : Any=400 , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : Tuple=None , lowerCAmelCase__ : str=True , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : str=[0.5, 0.5, 0.5] , lowerCAmelCase__ : int=[0.5, 0.5, 0.5] , ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = size if size is not None else {'''shortest_edge''': 18}
_UpperCamelCase = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = num_channels
_UpperCamelCase = image_size
_UpperCamelCase = min_resolution
_UpperCamelCase = max_resolution
_UpperCamelCase = do_resize
_UpperCamelCase = size
_UpperCamelCase = do_center_crop
_UpperCamelCase = crop_size
_UpperCamelCase = do_normalize
_UpperCamelCase = image_mean
_UpperCamelCase = image_std
def snake_case__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : Tuple = LevitImageProcessor if is_vision_available() else None
def snake_case__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = LevitImageProcessingTester(self )
@property
def snake_case__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , '''image_mean''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''image_std''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_center_crop''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''size''' ) )
def snake_case__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
_UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def snake_case__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
pass
def snake_case__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
_UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_UpperCamelCase = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def snake_case__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
_UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_UpperCamelCase = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def snake_case__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
_UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_UpperCamelCase = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 324 | 0 |
"""simple docstring"""
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class _a :
def __init__( self : List[str], lowerCAmelCase__ : List[str], lowerCAmelCase__ : Optional[int]=1_3, lowerCAmelCase__ : List[Any]=7, lowerCAmelCase__ : Optional[int]=True, lowerCAmelCase__ : Tuple=True, lowerCAmelCase__ : List[str]=9_9, lowerCAmelCase__ : Optional[Any]=3_2, lowerCAmelCase__ : List[Any]=5, lowerCAmelCase__ : Optional[int]=4, lowerCAmelCase__ : Tuple=3_7, lowerCAmelCase__ : Any="gelu", lowerCAmelCase__ : str=0.1, lowerCAmelCase__ : Union[str, Any]=0.1, lowerCAmelCase__ : Optional[Any]=5_0, lowerCAmelCase__ : List[Any]=0.02, lowerCAmelCase__ : List[str]=True, lowerCAmelCase__ : int=None, ) -> Dict:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = parent
_UpperCamelCase : int = batch_size
_UpperCamelCase : List[Any] = seq_length
_UpperCamelCase : List[str] = is_training
_UpperCamelCase : Union[str, Any] = use_input_mask
_UpperCamelCase : Dict = vocab_size
_UpperCamelCase : str = hidden_size
_UpperCamelCase : Optional[int] = num_hidden_layers
_UpperCamelCase : Optional[Any] = num_attention_heads
_UpperCamelCase : Any = intermediate_size
_UpperCamelCase : Tuple = hidden_act
_UpperCamelCase : Any = hidden_dropout_prob
_UpperCamelCase : int = attention_probs_dropout_prob
_UpperCamelCase : List[str] = max_position_embeddings
_UpperCamelCase : str = initializer_range
_UpperCamelCase : Optional[int] = use_labels
_UpperCamelCase : int = scope
def snake_case ( self : Optional[int] ) -> Any:
'''simple docstring'''
_UpperCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
_UpperCamelCase : Optional[int] = None
if self.use_input_mask:
_UpperCamelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
_UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
_UpperCamelCase : Optional[int] = self.get_config()
return config, input_ids, input_mask, token_labels
def snake_case ( self : str ) -> List[str]:
'''simple docstring'''
return BertGenerationConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, is_decoder=lowerCAmelCase__, initializer_range=self.initializer_range, )
def snake_case ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
(
_UpperCamelCase
) : Optional[Any] = self.prepare_config_and_inputs()
_UpperCamelCase : List[Any] = True
_UpperCamelCase : Optional[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_UpperCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def snake_case ( self : List[str], lowerCAmelCase__ : Dict, lowerCAmelCase__ : List[str], lowerCAmelCase__ : Any, lowerCAmelCase__ : Optional[Any], **lowerCAmelCase__ : Dict, ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase : List[str] = BertGenerationEncoder(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCamelCase : List[Any] = model(lowerCAmelCase__, attention_mask=lowerCAmelCase__ )
_UpperCamelCase : Dict = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self : List[Any], lowerCAmelCase__ : List[Any], lowerCAmelCase__ : str, lowerCAmelCase__ : Union[str, Any], lowerCAmelCase__ : str, lowerCAmelCase__ : List[str], lowerCAmelCase__ : Optional[Any], **lowerCAmelCase__ : int, ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase : str = True
_UpperCamelCase : Union[str, Any] = BertGenerationEncoder(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCamelCase : List[str] = model(
lowerCAmelCase__, attention_mask=lowerCAmelCase__, encoder_hidden_states=lowerCAmelCase__, encoder_attention_mask=lowerCAmelCase__, )
_UpperCamelCase : Optional[Any] = model(
lowerCAmelCase__, attention_mask=lowerCAmelCase__, encoder_hidden_states=lowerCAmelCase__, )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self : Dict, lowerCAmelCase__ : Optional[int], lowerCAmelCase__ : Dict, lowerCAmelCase__ : Union[str, Any], lowerCAmelCase__ : Tuple, lowerCAmelCase__ : str, lowerCAmelCase__ : List[Any], **lowerCAmelCase__ : List[str], ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase : List[str] = True
_UpperCamelCase : List[Any] = True
_UpperCamelCase : str = BertGenerationDecoder(config=lowerCAmelCase__ ).to(lowerCAmelCase__ ).eval()
# first forward pass
_UpperCamelCase : List[Any] = model(
lowerCAmelCase__, attention_mask=lowerCAmelCase__, encoder_hidden_states=lowerCAmelCase__, encoder_attention_mask=lowerCAmelCase__, use_cache=lowerCAmelCase__, )
_UpperCamelCase : str = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_UpperCamelCase : Union[str, Any] = ids_tensor((self.batch_size, 3), config.vocab_size )
_UpperCamelCase : Any = ids_tensor((self.batch_size, 3), vocab_size=2 )
# append to next input_ids and
_UpperCamelCase : List[str] = torch.cat([input_ids, next_tokens], dim=-1 )
_UpperCamelCase : Tuple = torch.cat([input_mask, next_mask], dim=-1 )
_UpperCamelCase : Optional[Any] = model(
lowerCAmelCase__, attention_mask=lowerCAmelCase__, encoder_hidden_states=lowerCAmelCase__, encoder_attention_mask=lowerCAmelCase__, output_hidden_states=lowerCAmelCase__, )['''hidden_states'''][0]
_UpperCamelCase : Any = model(
lowerCAmelCase__, attention_mask=lowerCAmelCase__, encoder_hidden_states=lowerCAmelCase__, encoder_attention_mask=lowerCAmelCase__, past_key_values=lowerCAmelCase__, output_hidden_states=lowerCAmelCase__, )['''hidden_states'''][0]
# select random slice
_UpperCamelCase : int = ids_tensor((1,), output_from_past.shape[-1] ).item()
_UpperCamelCase : List[str] = output_from_no_past[:, -3:, random_slice_idx].detach()
_UpperCamelCase : Dict = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCAmelCase__, lowerCAmelCase__, atol=1e-3 ) )
def snake_case ( self : Optional[int], lowerCAmelCase__ : int, lowerCAmelCase__ : List[str], lowerCAmelCase__ : Optional[Any], lowerCAmelCase__ : Dict, *lowerCAmelCase__ : Dict, ) -> Dict:
'''simple docstring'''
_UpperCamelCase : Tuple = BertGenerationDecoder(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCamelCase : Optional[Any] = model(lowerCAmelCase__, attention_mask=lowerCAmelCase__, labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self : str ) -> int:
'''simple docstring'''
_UpperCamelCase : List[str] = self.prepare_config_and_inputs()
_UpperCamelCase : int = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _a ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
UpperCamelCase = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
UpperCamelCase = (BertGenerationDecoder,) if is_torch_available() else ()
UpperCamelCase = (
{'''feature-extraction''': BertGenerationEncoder, '''text-generation''': BertGenerationDecoder}
if is_torch_available()
else {}
)
def snake_case ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase : Tuple = BertGenerationEncoderTester(self )
_UpperCamelCase : str = ConfigTester(self, config_class=lowerCAmelCase__, hidden_size=3_7 )
def snake_case ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case ( self : int ) -> Dict:
'''simple docstring'''
_UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def snake_case ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase : str = self.model_tester.prepare_config_and_inputs()
_UpperCamelCase : Tuple = '''bert'''
self.model_tester.create_and_check_model(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ )
def snake_case ( self : Any ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowerCAmelCase__ )
def snake_case ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*lowerCAmelCase__ )
def snake_case ( self : str ) -> Optional[int]:
'''simple docstring'''
(
_UpperCamelCase
) : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
_UpperCamelCase : Union[str, Any] = None
self.model_tester.create_and_check_model_as_decoder(
lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, )
def snake_case ( self : str ) -> List[str]:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*lowerCAmelCase__ )
@slow
def snake_case ( self : Dict ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase : Dict = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
self.assertIsNotNone(lowerCAmelCase__ )
@require_torch
class _a ( unittest.TestCase ):
@slow
def snake_case ( self : Tuple ) -> Any:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
_UpperCamelCase : Any = torch.tensor([[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_2_6, 3_8_9_9, 2_0_0_3, 1_0_1_4_0, 1_0_2]] )
with torch.no_grad():
_UpperCamelCase : Optional[Any] = model(lowerCAmelCase__ )[0]
_UpperCamelCase : int = torch.Size([1, 8, 1_0_2_4] )
self.assertEqual(output.shape, lowerCAmelCase__ )
_UpperCamelCase : Any = torch.tensor(
[[[0.1_775, 0.0_083, -0.0_321], [1.6_002, 0.1_287, 0.3_912], [2.1_473, 0.5_791, 0.6_066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], lowerCAmelCase__, atol=1e-4 ) )
@require_torch
class _a ( unittest.TestCase ):
@slow
def snake_case ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase : int = BertGenerationDecoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
_UpperCamelCase : Union[str, Any] = torch.tensor([[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_2_6, 3_8_9_9, 2_0_0_3, 1_0_1_4_0, 1_0_2]] )
with torch.no_grad():
_UpperCamelCase : Tuple = model(lowerCAmelCase__ )[0]
_UpperCamelCase : Any = torch.Size([1, 8, 5_0_3_5_8] )
self.assertEqual(output.shape, lowerCAmelCase__ )
_UpperCamelCase : List[str] = torch.tensor(
[[[-0.5_788, -2.5_994, -3.7_054], [0.0_438, 4.7_997, 1.8_795], [1.5_862, 6.6_409, 4.4_638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], lowerCAmelCase__, atol=1e-4 ) )
| 358 |
"""simple docstring"""
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _a ( _lowerCAmelCase ):
UpperCamelCase = ['''image_processor''', '''tokenizer''']
UpperCamelCase = '''BlipImageProcessor'''
UpperCamelCase = '''AutoTokenizer'''
def __init__( self : List[str], lowerCAmelCase__ : Optional[int], lowerCAmelCase__ : Optional[int] ) -> int:
'''simple docstring'''
_UpperCamelCase : Any = False
super().__init__(lowerCAmelCase__, lowerCAmelCase__ )
_UpperCamelCase : Tuple = self.image_processor
def __call__( self : str, lowerCAmelCase__ : ImageInput = None, lowerCAmelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None, lowerCAmelCase__ : bool = True, lowerCAmelCase__ : Union[bool, str, PaddingStrategy] = False, lowerCAmelCase__ : Union[bool, str, TruncationStrategy] = None, lowerCAmelCase__ : Optional[int] = None, lowerCAmelCase__ : int = 0, lowerCAmelCase__ : Optional[int] = None, lowerCAmelCase__ : Optional[bool] = None, lowerCAmelCase__ : bool = False, lowerCAmelCase__ : bool = False, lowerCAmelCase__ : bool = False, lowerCAmelCase__ : bool = False, lowerCAmelCase__ : bool = False, lowerCAmelCase__ : bool = True, lowerCAmelCase__ : Optional[Union[str, TensorType]] = None, **lowerCAmelCase__ : Optional[Any], ) -> BatchEncoding:
'''simple docstring'''
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None:
_UpperCamelCase : int = self.tokenizer
_UpperCamelCase : List[str] = self.tokenizer(
text=lowerCAmelCase__, add_special_tokens=lowerCAmelCase__, padding=lowerCAmelCase__, truncation=lowerCAmelCase__, max_length=lowerCAmelCase__, stride=lowerCAmelCase__, pad_to_multiple_of=lowerCAmelCase__, return_attention_mask=lowerCAmelCase__, return_overflowing_tokens=lowerCAmelCase__, return_special_tokens_mask=lowerCAmelCase__, return_offsets_mapping=lowerCAmelCase__, return_token_type_ids=lowerCAmelCase__, return_length=lowerCAmelCase__, verbose=lowerCAmelCase__, return_tensors=lowerCAmelCase__, **lowerCAmelCase__, )
return text_encoding
# add pixel_values
_UpperCamelCase : List[str] = self.image_processor(lowerCAmelCase__, return_tensors=lowerCAmelCase__ )
if text is not None:
_UpperCamelCase : Any = self.tokenizer(
text=lowerCAmelCase__, add_special_tokens=lowerCAmelCase__, padding=lowerCAmelCase__, truncation=lowerCAmelCase__, max_length=lowerCAmelCase__, stride=lowerCAmelCase__, pad_to_multiple_of=lowerCAmelCase__, return_attention_mask=lowerCAmelCase__, return_overflowing_tokens=lowerCAmelCase__, return_special_tokens_mask=lowerCAmelCase__, return_offsets_mapping=lowerCAmelCase__, return_token_type_ids=lowerCAmelCase__, return_length=lowerCAmelCase__, verbose=lowerCAmelCase__, return_tensors=lowerCAmelCase__, **lowerCAmelCase__, )
else:
_UpperCamelCase : List[Any] = None
if text_encoding is not None:
encoding_image_processor.update(lowerCAmelCase__ )
return encoding_image_processor
def snake_case ( self : List[Any], *lowerCAmelCase__ : List[str], **lowerCAmelCase__ : str ) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCAmelCase__, **lowerCAmelCase__ )
def snake_case ( self : List[Any], *lowerCAmelCase__ : Dict, **lowerCAmelCase__ : Any ) -> List[str]:
'''simple docstring'''
return self.tokenizer.decode(*lowerCAmelCase__, **lowerCAmelCase__ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def snake_case ( self : Any ) -> str:
'''simple docstring'''
_UpperCamelCase : List[str] = self.tokenizer.model_input_names
_UpperCamelCase : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 128 | 0 |
'''simple docstring'''
def __UpperCamelCase ( UpperCAmelCase ):
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("""Program to check whether a number is a Perfect number or not...""")
__a: Any = int(input("""Enter number: """).strip())
print(F'{number} is {"" if perfect(number) else "not "}a Perfect Number.')
| 198 | from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class a__ ( yaml.SafeLoader ):
def __UpperCamelCase ( self : str,_A : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = [self.constructed_objects[key_node] for key_node, _ in node.value]
SCREAMING_SNAKE_CASE_ : List[str] = [tuple(_A ) if isinstance(_A,_A ) else key for key in keys]
SCREAMING_SNAKE_CASE_ : Optional[int] = Counter(_A )
SCREAMING_SNAKE_CASE_ : Tuple = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F'Got duplicate yaml keys: {duplicate_keys}' )
def __UpperCamelCase ( self : Tuple,_A : Dict,_A : List[Any]=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = super().construct_mapping(_A,deep=_A )
self._check_no_duplicates_on_constructed_node(_A )
return mapping
def _snake_case ( lowerCAmelCase : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
SCREAMING_SNAKE_CASE_ : List[Any] = full_content[1:].index("---" ) + 1
SCREAMING_SNAKE_CASE_ : int = "\n".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(lowerCAmelCase )
class a__ ( A__ ):
# class attributes
A = {'train_eval_index'} # train-eval-index in the YAML metadata
@classmethod
def __UpperCamelCase ( cls : Any,_A : Path ):
"""simple docstring"""
with open(_A,encoding="utf-8" ) as readme_file:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(_A )
else:
return cls()
def __UpperCamelCase ( self : Dict,_A : Path ):
"""simple docstring"""
if path.exists():
with open(_A,encoding="utf-8" ) as readme_file:
SCREAMING_SNAKE_CASE_ : int = readme_file.read()
else:
SCREAMING_SNAKE_CASE_ : Any = None
SCREAMING_SNAKE_CASE_ : int = self._to_readme(_A )
with open(_A,"w",encoding="utf-8" ) as readme_file:
readme_file.write(_A )
def __UpperCamelCase ( self : Optional[int],_A : Optional[str] = None ):
"""simple docstring"""
if readme_content is not None:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = _split_yaml_from_readme(_A )
SCREAMING_SNAKE_CASE_ : Tuple = "---\n" + self.to_yaml_string() + "---\n" + content
else:
SCREAMING_SNAKE_CASE_ : Dict = "---\n" + self.to_yaml_string() + "---\n"
return full_content
@classmethod
def __UpperCamelCase ( cls : Dict,_A : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = yaml.load(_A,Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
SCREAMING_SNAKE_CASE_ : Any = {
(key.replace("-","_" ) if key.replace("-","_" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**_A )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
return yaml.safe_dump(
{
(key.replace("_","-" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
},sort_keys=_A,allow_unicode=_A,encoding="utf-8",).decode("utf-8" )
__lowerCamelCase : List[Any] = {
'''image-classification''': [],
'''translation''': [],
'''image-segmentation''': [],
'''fill-mask''': [],
'''automatic-speech-recognition''': [],
'''token-classification''': [],
'''sentence-similarity''': [],
'''audio-classification''': [],
'''question-answering''': [],
'''summarization''': [],
'''zero-shot-classification''': [],
'''table-to-text''': [],
'''feature-extraction''': [],
'''other''': [],
'''multiple-choice''': [],
'''text-classification''': [],
'''text-to-image''': [],
'''text2text-generation''': [],
'''zero-shot-image-classification''': [],
'''tabular-classification''': [],
'''tabular-regression''': [],
'''image-to-image''': [],
'''tabular-to-text''': [],
'''unconditional-image-generation''': [],
'''text-retrieval''': [],
'''text-to-speech''': [],
'''object-detection''': [],
'''audio-to-audio''': [],
'''text-generation''': [],
'''conversational''': [],
'''table-question-answering''': [],
'''visual-question-answering''': [],
'''image-to-text''': [],
'''reinforcement-learning''': [],
'''voice-activity-detection''': [],
'''time-series-forecasting''': [],
'''document-question-answering''': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
__lowerCamelCase : List[Any] = ArgumentParser(usage='''Validate the yaml metadata block of a README.md file.''')
ap.add_argument('''readme_filepath''')
__lowerCamelCase : Dict = ap.parse_args()
__lowerCamelCase : List[Any] = Path(args.readme_filepath)
__lowerCamelCase : Optional[int] = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 18 | 0 |
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
lowerCamelCase : Tuple = logging.get_logger(__name__)
def snake_case_ ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int ):
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def snake_case_ ( lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Optional[str] , lowerCAmelCase_ : Optional[str] = None ):
__lowercase : List[Any] = tesseract_config if tesseract_config is not None else """"""
# apply OCR
__lowercase : Dict = to_pil_image(lowerCAmelCase_ )
__lowercase , __lowercase : Dict = pil_image.size
__lowercase : Tuple = pytesseract.image_to_data(lowerCAmelCase_ , lang=lowerCAmelCase_ , output_type="""dict""" , config=lowerCAmelCase_ )
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase : int = data["""text"""], data["""left"""], data["""top"""], data["""width"""], data["""height"""]
# filter empty words and corresponding coordinates
__lowercase : int = [idx for idx, word in enumerate(lowerCAmelCase_ ) if not word.strip()]
__lowercase : Optional[int] = [word for idx, word in enumerate(lowerCAmelCase_ ) if idx not in irrelevant_indices]
__lowercase : Optional[int] = [coord for idx, coord in enumerate(lowerCAmelCase_ ) if idx not in irrelevant_indices]
__lowercase : Dict = [coord for idx, coord in enumerate(lowerCAmelCase_ ) if idx not in irrelevant_indices]
__lowercase : Dict = [coord for idx, coord in enumerate(lowerCAmelCase_ ) if idx not in irrelevant_indices]
__lowercase : Tuple = [coord for idx, coord in enumerate(lowerCAmelCase_ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
__lowercase : Optional[Any] = []
for x, y, w, h in zip(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
__lowercase : str = [x, y, x + w, y + h]
actual_boxes.append(lowerCAmelCase_ )
# finally, normalize the bounding boxes
__lowercase : List[str] = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) )
assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : Dict = ['''pixel_values''']
def __init__( self : Tuple , __a : bool = True , __a : Dict[str, int] = None , __a : PILImageResampling = PILImageResampling.BILINEAR , __a : bool = True , __a : Optional[str] = None , __a : Optional[str] = "" , **__a : Union[str, Any] , ) -> None:
"""simple docstring"""
super().__init__(**__a )
__lowercase : Optional[Any] = size if size is not None else {"""height""": 224, """width""": 224}
__lowercase : int = get_size_dict(__a )
__lowercase : Optional[Any] = do_resize
__lowercase : Tuple = size
__lowercase : List[Any] = resample
__lowercase : Any = apply_ocr
__lowercase : int = ocr_lang
__lowercase : List[str] = tesseract_config
def lowerCAmelCase ( self : Optional[int] , __a : np.ndarray , __a : Dict[str, int] , __a : PILImageResampling = PILImageResampling.BILINEAR , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[str] , ) -> np.ndarray:
"""simple docstring"""
__lowercase : Any = get_size_dict(__a )
if "height" not in size or "width" not in size:
raise ValueError(F"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}" )
__lowercase : Any = (size["""height"""], size["""width"""])
return resize(__a , size=__a , resample=__a , data_format=__a , **__a )
def lowerCAmelCase ( self : Dict , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : Optional[str] = None , __a : Optional[str] = None , __a : Optional[Union[str, TensorType]] = None , __a : ChannelDimension = ChannelDimension.FIRST , **__a : int , ) -> PIL.Image.Image:
"""simple docstring"""
__lowercase : Tuple = do_resize if do_resize is not None else self.do_resize
__lowercase : Optional[int] = size if size is not None else self.size
__lowercase : Optional[int] = get_size_dict(__a )
__lowercase : List[Any] = resample if resample is not None else self.resample
__lowercase : Union[str, Any] = apply_ocr if apply_ocr is not None else self.apply_ocr
__lowercase : Optional[Any] = ocr_lang if ocr_lang is not None else self.ocr_lang
__lowercase : Tuple = tesseract_config if tesseract_config is not None else self.tesseract_config
__lowercase : Optional[int] = make_list_of_images(__a )
if not valid_images(__a ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
# All transformations expect numpy arrays.
__lowercase : Tuple = [to_numpy_array(__a ) for image in images]
if apply_ocr:
requires_backends(self , """pytesseract""" )
__lowercase : Union[str, Any] = []
__lowercase : Union[str, Any] = []
for image in images:
__lowercase , __lowercase : int = apply_tesseract(__a , __a , __a )
words_batch.append(__a )
boxes_batch.append(__a )
if do_resize:
__lowercase : Optional[int] = [self.resize(image=__a , size=__a , resample=__a ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
__lowercase : str = [flip_channel_order(__a ) for image in images]
__lowercase : Dict = [to_channel_dimension_format(__a , __a ) for image in images]
__lowercase : Optional[int] = BatchFeature(data={"""pixel_values""": images} , tensor_type=__a )
if apply_ocr:
__lowercase : List[str] = words_batch
__lowercase : Optional[Any] = boxes_batch
return data | 306 |
from __future__ import annotations
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
__lowercase : Any = get_failure_array(lowerCAmelCase_ )
# 2) Step through text searching for pattern
__lowercase , __lowercase : Optional[int] = 0, 0 # index into text, pattern
while i < len(lowerCAmelCase_ ):
if pattern[j] == text[i]:
if j == (len(lowerCAmelCase_ ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
__lowercase : Optional[Any] = failure[j - 1]
continue
i += 1
return False
def snake_case_ ( lowerCAmelCase_ : str ):
__lowercase : List[Any] = [0]
__lowercase : Optional[Any] = 0
__lowercase : List[Any] = 1
while j < len(lowerCAmelCase_ ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
__lowercase : List[str] = failure[i - 1]
continue
j += 1
failure.append(lowerCAmelCase_ )
return failure
if __name__ == "__main__":
# Test 1)
lowerCamelCase : Dict = '''abc1abc12'''
lowerCamelCase : Union[str, Any] = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
lowerCamelCase : Any = '''alskfjaldsk23adsfabcabc'''
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
lowerCamelCase : List[Any] = '''ABABX'''
lowerCamelCase : List[Any] = '''ABABZABABYABABX'''
assert kmp(pattern, text)
# Test 3)
lowerCamelCase : int = '''AAAB'''
lowerCamelCase : Optional[int] = '''ABAAAAAB'''
assert kmp(pattern, text)
# Test 4)
lowerCamelCase : Optional[Any] = '''abcdabcy'''
lowerCamelCase : Any = '''abcxabcdabxabcdabcdabcy'''
assert kmp(pattern, text)
# Test 5)
lowerCamelCase : Dict = '''aabaabaaa'''
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2] | 306 | 1 |
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
__lowercase = [
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""enhancement""",
"""new pipeline/model""",
"""new scheduler""",
"""wip""",
]
def lowercase ( )-> List[str]:
'''simple docstring'''
a : Union[str, Any] = Github(os.environ["GITHUB_TOKEN"] )
a : Union[str, Any] = g.get_repo("huggingface/diffusers" )
a : Union[str, Any] = repo.get_issues(state="open" )
for issue in open_issues:
a : str = sorted(issue.get_comments() , key=lambda A_ : i.created_at , reverse=A_ )
a : Tuple = comments[0] if len(A_ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="closed" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="open" )
issue.remove_from_labels("stale" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
issue.add_to_labels("stale" )
if __name__ == "__main__":
main()
| 40 |
"""simple docstring"""
def lowercase ( A_ )-> str:
'''simple docstring'''
if isinstance(A_ , A_ ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if isinstance(A_ , A_ ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if num == 0:
return "0b0"
a : Optional[Any] = False
if num < 0:
a : Tuple = True
a : str = -num
a : list[int] = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(A_ ) for e in binary )
return "0b" + "".join(str(A_ ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40 | 1 |
import os
import string
import sys
_lowerCamelCase : List[Any] = 1 << 8
_lowerCamelCase : Union[str, Any] = {
"tab": ord("\t"),
"newline": ord("\r"),
"esc": 2_7,
"up": 6_5 + ARROW_KEY_FLAG,
"down": 6_6 + ARROW_KEY_FLAG,
"right": 6_7 + ARROW_KEY_FLAG,
"left": 6_8 + ARROW_KEY_FLAG,
"mod_int": 9_1,
"undefined": sys.maxsize,
"interrupt": 3,
"insert": 5_0,
"delete": 5_1,
"pg_up": 5_3,
"pg_down": 5_4,
}
_lowerCamelCase : int = KEYMAP["up"]
_lowerCamelCase : str = KEYMAP["left"]
if sys.platform == "win32":
_lowerCamelCase : Dict = []
_lowerCamelCase : Union[str, Any] = {
B"\xe0H": KEYMAP["up"] - ARROW_KEY_FLAG,
B"\x00H": KEYMAP["up"] - ARROW_KEY_FLAG,
B"\xe0P": KEYMAP["down"] - ARROW_KEY_FLAG,
B"\x00P": KEYMAP["down"] - ARROW_KEY_FLAG,
B"\xe0M": KEYMAP["right"] - ARROW_KEY_FLAG,
B"\x00M": KEYMAP["right"] - ARROW_KEY_FLAG,
B"\xe0K": KEYMAP["left"] - ARROW_KEY_FLAG,
B"\x00K": KEYMAP["left"] - ARROW_KEY_FLAG,
}
for i in range(1_0):
_lowerCamelCase : Union[str, Any] = ord(str(i))
def a__ ( ) -> Union[str, Any]:
if os.name == "nt":
import msvcrt
UpperCAmelCase : Union[str, Any] = '''mbcs'''
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(UpperCAmelCase ) == 0:
# Read the keystroke
UpperCAmelCase : int = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
UpperCAmelCase : Dict = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
UpperCAmelCase : int = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['''mod_int'''] ) )
WIN_CH_BUFFER.append(UpperCAmelCase )
if ord(UpperCAmelCase ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
UpperCAmelCase : Union[str, Any] = chr(KEYMAP['''esc'''] )
except KeyError:
UpperCAmelCase : int = cha[1]
else:
UpperCAmelCase : Dict = ch.decode(UpperCAmelCase )
else:
UpperCAmelCase : Any = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
UpperCAmelCase : Union[str, Any] = sys.stdin.fileno()
UpperCAmelCase : Optional[Any] = termios.tcgetattr(UpperCAmelCase )
try:
tty.setraw(UpperCAmelCase )
UpperCAmelCase : Union[str, Any] = sys.stdin.read(1 )
finally:
termios.tcsetattr(UpperCAmelCase , termios.TCSADRAIN , UpperCAmelCase )
return ch
def a__ ( ) -> Any:
UpperCAmelCase : Dict = get_raw_chars()
if ord(UpperCAmelCase ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(UpperCAmelCase ) == KEYMAP["esc"]:
UpperCAmelCase : Dict = get_raw_chars()
if ord(UpperCAmelCase ) == KEYMAP["mod_int"]:
UpperCAmelCase : Optional[Any] = get_raw_chars()
if ord(UpperCAmelCase ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(UpperCAmelCase ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(UpperCAmelCase ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 99 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class __UpperCAmelCase ( unittest.TestCase ):
def __init__( self : List[Any], __A : str, __A : List[str]=7, __A : List[str]=3, __A : Optional[int]=1_8, __A : List[Any]=3_0, __A : Tuple=4_0_0, __A : Tuple=True, __A : List[Any]=None, __A : str=True, __A : int=None, __A : Optional[Any]=True, __A : List[Any]=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3], __A : List[str]=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1], __A : Tuple=True, ):
UpperCAmelCase : int = size if size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
UpperCAmelCase : Optional[Any] = crop_size if crop_size is not None else {'''height''': 1_8, '''width''': 1_8}
UpperCAmelCase : int = parent
UpperCAmelCase : Union[str, Any] = batch_size
UpperCAmelCase : List[Any] = num_channels
UpperCAmelCase : Dict = image_size
UpperCAmelCase : List[str] = min_resolution
UpperCAmelCase : Optional[Any] = max_resolution
UpperCAmelCase : Union[str, Any] = do_resize
UpperCAmelCase : Dict = size
UpperCAmelCase : Any = do_center_crop
UpperCAmelCase : Union[str, Any] = crop_size
UpperCAmelCase : List[str] = do_normalize
UpperCAmelCase : Optional[Any] = image_mean
UpperCAmelCase : Optional[Any] = image_std
UpperCAmelCase : List[Any] = do_convert_rgb
def __magic_name__ ( self : int ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def __magic_name__ ( self : Optional[Any], __A : Any=False, __A : str=False, __A : List[Any]=False ):
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
UpperCAmelCase : Dict = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
2_5_5, size=(self.num_channels, self.max_resolution, self.max_resolution), dtype=np.uinta ) )
else:
UpperCAmelCase : Tuple = []
for i in range(self.batch_size ):
UpperCAmelCase , UpperCAmelCase : Tuple = np.random.choice(np.arange(self.min_resolution, self.max_resolution ), 2 )
image_inputs.append(np.random.randint(2_5_5, size=(self.num_channels, width, height), dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
UpperCAmelCase : Optional[int] = [Image.fromarray(np.moveaxis(__A, 0, -1 ) ) for x in image_inputs]
if torchify:
UpperCAmelCase : str = [torch.from_numpy(__A ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class __UpperCAmelCase ( lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = ChineseCLIPImageProcessor if is_vision_available() else None
def __magic_name__ ( self : Dict ):
UpperCAmelCase : Optional[Any] = ChineseCLIPImageProcessingTester(self, do_center_crop=__A )
@property
def __magic_name__ ( self : Tuple ):
return self.image_processor_tester.prepare_image_processor_dict()
def __magic_name__ ( self : Any ):
UpperCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A, '''do_resize''' ) )
self.assertTrue(hasattr(__A, '''size''' ) )
self.assertTrue(hasattr(__A, '''do_center_crop''' ) )
self.assertTrue(hasattr(__A, '''center_crop''' ) )
self.assertTrue(hasattr(__A, '''do_normalize''' ) )
self.assertTrue(hasattr(__A, '''image_mean''' ) )
self.assertTrue(hasattr(__A, '''image_std''' ) )
self.assertTrue(hasattr(__A, '''do_convert_rgb''' ) )
def __magic_name__ ( self : Optional[Any] ):
UpperCAmelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'''height''': 2_2_4, '''width''': 2_2_4} )
self.assertEqual(image_processor.crop_size, {'''height''': 1_8, '''width''': 1_8} )
UpperCAmelCase : int = self.image_processing_class.from_dict(self.image_processor_dict, size=4_2, crop_size=8_4 )
self.assertEqual(image_processor.size, {'''shortest_edge''': 4_2} )
self.assertEqual(image_processor.crop_size, {'''height''': 8_4, '''width''': 8_4} )
def __magic_name__ ( self : Union[str, Any] ):
pass
def __magic_name__ ( self : Tuple ):
# Initialize image_processing
UpperCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase : Optional[int] = self.image_processor_tester.prepare_inputs(equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A, Image.Image )
# Test not batched input
UpperCAmelCase : str = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
UpperCAmelCase : Any = image_processing(__A, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
def __magic_name__ ( self : Optional[Any] ):
# Initialize image_processing
UpperCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase : List[Any] = self.image_processor_tester.prepare_inputs(equal_resolution=__A, numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A, np.ndarray )
# Test not batched input
UpperCAmelCase : List[str] = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
UpperCAmelCase : Dict = image_processing(__A, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
def __magic_name__ ( self : Any ):
# Initialize image_processing
UpperCAmelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase : Optional[Any] = self.image_processor_tester.prepare_inputs(equal_resolution=__A, torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A, torch.Tensor )
# Test not batched input
UpperCAmelCase : str = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
UpperCAmelCase : List[str] = image_processing(__A, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
@require_torch
@require_vision
class __UpperCAmelCase ( lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = ChineseCLIPImageProcessor if is_vision_available() else None
def __magic_name__ ( self : Optional[Any] ):
UpperCAmelCase : str = ChineseCLIPImageProcessingTester(self, num_channels=4, do_center_crop=__A )
UpperCAmelCase : Dict = 3
@property
def __magic_name__ ( self : List[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def __magic_name__ ( self : Dict ):
UpperCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A, '''do_resize''' ) )
self.assertTrue(hasattr(__A, '''size''' ) )
self.assertTrue(hasattr(__A, '''do_center_crop''' ) )
self.assertTrue(hasattr(__A, '''center_crop''' ) )
self.assertTrue(hasattr(__A, '''do_normalize''' ) )
self.assertTrue(hasattr(__A, '''image_mean''' ) )
self.assertTrue(hasattr(__A, '''image_std''' ) )
self.assertTrue(hasattr(__A, '''do_convert_rgb''' ) )
def __magic_name__ ( self : List[Any] ):
pass
def __magic_name__ ( self : Tuple ):
# Initialize image_processing
UpperCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase : Dict = self.image_processor_tester.prepare_inputs(equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A, Image.Image )
# Test not batched input
UpperCAmelCase : Tuple = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
UpperCAmelCase : Tuple = image_processing(__A, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
| 99 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE_ = {
'''configuration_roc_bert''': ['''ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoCBertConfig'''],
'''tokenization_roc_bert''': ['''RoCBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'''ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RoCBertForCausalLM''',
'''RoCBertForMaskedLM''',
'''RoCBertForMultipleChoice''',
'''RoCBertForPreTraining''',
'''RoCBertForQuestionAnswering''',
'''RoCBertForSequenceClassification''',
'''RoCBertForTokenClassification''',
'''RoCBertLayer''',
'''RoCBertModel''',
'''RoCBertPreTrainedModel''',
'''load_tf_weights_in_roc_bert''',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 301 |
"""simple docstring"""
import math
def lowercase (_lowerCAmelCase ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowercase (_lowerCAmelCase = 0.1 ):
__lowerCAmelCase = 3
__lowerCAmelCase = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(_lowerCAmelCase )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 301 | 1 |
from __future__ import annotations
__UpperCamelCase : Union[str, Any] = 10
def _a ( SCREAMING_SNAKE_CASE : list[int] ):
"""simple docstring"""
UpperCamelCase__ : Dict = 1
UpperCamelCase__ : Tuple = max(SCREAMING_SNAKE_CASE )
while placement <= max_digit:
# declare and initialize empty buckets
UpperCamelCase__ : list[list] = [[] for _ in range(SCREAMING_SNAKE_CASE )]
# split list_of_ints between the buckets
for i in list_of_ints:
UpperCamelCase__ : List[Any] = int((i / placement) % RADIX )
buckets[tmp].append(SCREAMING_SNAKE_CASE )
# put each buckets' contents into list_of_ints
UpperCamelCase__ : Optional[int] = 0
for b in range(SCREAMING_SNAKE_CASE ):
for i in buckets[b]:
UpperCamelCase__ : List[Any] = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 51 |
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def _a ( SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ : Dict = image.size
UpperCamelCase__ , UpperCamelCase__ : List[Any] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
UpperCamelCase__ : Any = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] )
UpperCamelCase__ : Union[str, Any] = np.array(SCREAMING_SNAKE_CASE ).astype(np.floataa ) / 255.0
UpperCamelCase__ : Optional[int] = image[None].transpose(0 , 3 , 1 , 2 )
UpperCamelCase__ : int = torch.from_numpy(SCREAMING_SNAKE_CASE )
return 2.0 * image - 1.0
class __magic_name__ ( __lowerCAmelCase):
def __init__( self : Dict , lowerCamelCase__ : VQModel , lowerCamelCase__ : UNetaDModel , lowerCamelCase__ : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ) -> Tuple:
'''simple docstring'''
super().__init__()
self.register_modules(vqvae=lowerCamelCase__ , unet=lowerCamelCase__ , scheduler=lowerCamelCase__ )
@torch.no_grad()
def __call__( self : int , lowerCamelCase__ : Union[torch.Tensor, PIL.Image.Image] = None , lowerCamelCase__ : Optional[int] = 1 , lowerCamelCase__ : Optional[int] = 100 , lowerCamelCase__ : Optional[float] = 0.0 , lowerCamelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase__ : Optional[str] = "pil" , lowerCamelCase__ : bool = True , ) -> Union[Tuple, ImagePipelineOutput]:
'''simple docstring'''
if isinstance(lowerCamelCase__ , PIL.Image.Image ):
UpperCamelCase__ : int = 1
elif isinstance(lowerCamelCase__ , torch.Tensor ):
UpperCamelCase__ : Dict = image.shape[0]
else:
raise ValueError(F"`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(lowerCamelCase__ )}" )
if isinstance(lowerCamelCase__ , PIL.Image.Image ):
UpperCamelCase__ : Any = preprocess(lowerCamelCase__ )
UpperCamelCase__ , UpperCamelCase__ : Tuple = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
UpperCamelCase__ : Any = (batch_size, self.unet.config.in_channels // 2, height, width)
UpperCamelCase__ : Union[str, Any] = next(self.unet.parameters() ).dtype
UpperCamelCase__ : Any = randn_tensor(lowerCamelCase__ , generator=lowerCamelCase__ , device=self.device , dtype=lowerCamelCase__ )
UpperCamelCase__ : Any = image.to(device=self.device , dtype=lowerCamelCase__ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(lowerCamelCase__ , device=self.device )
UpperCamelCase__ : str = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase__ : int = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCamelCase__ : Dict = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase__ : Optional[int] = {}
if accepts_eta:
UpperCamelCase__ : Union[str, Any] = eta
for t in self.progress_bar(lowerCamelCase__ ):
# concat latents and low resolution image in the channel dimension.
UpperCamelCase__ : Any = torch.cat([latents, image] , dim=1 )
UpperCamelCase__ : List[str] = self.scheduler.scale_model_input(lowerCamelCase__ , lowerCamelCase__ )
# predict the noise residual
UpperCamelCase__ : Dict = self.unet(lowerCamelCase__ , lowerCamelCase__ ).sample
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase__ : Tuple = self.scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
# decode the image latents with the VQVAE
UpperCamelCase__ : Tuple = self.vqvae.decode(lowerCamelCase__ ).sample
UpperCamelCase__ : Tuple = torch.clamp(lowerCamelCase__ , -1.0 , 1.0 )
UpperCamelCase__ : Any = image / 2 + 0.5
UpperCamelCase__ : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase__ : List[str] = self.numpy_to_pil(lowerCamelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase__ )
| 51 | 1 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self :Dict ):
A = "| <pad> <unk> <s> </s> a b c d e f g h i j k".split()
A = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) )
A = {
"unk_token": "<unk>",
"bos_token": "<s>",
"eos_token": "</s>",
}
A = {
"feature_size": 1,
"padding_value": 0.0,
"sampling_rate": 1_60_00,
"return_attention_mask": False,
"do_normalize": True,
}
A = tempfile.mkdtemp()
A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
A = os.path.join(self.tmpdirname , __UpperCamelCase )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__UpperCamelCase ) + "\n" )
with open(self.feature_extraction_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__UpperCamelCase ) + "\n" )
# load decoder from hub
A = "hf-internal-testing/ngram-beam-search-decoder"
def lowerCamelCase ( self :List[str] , **__UpperCamelCase :Tuple ):
A = self.add_kwargs_tokens_map.copy()
kwargs.update(__UpperCamelCase )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def lowerCamelCase ( self :List[Any] , **__UpperCamelCase :int ):
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def lowerCamelCase ( self :List[Any] , **__UpperCamelCase :List[str] ):
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **__UpperCamelCase )
def lowerCamelCase ( self :List[Any] ):
shutil.rmtree(self.tmpdirname )
def lowerCamelCase ( self :Optional[Any] ):
A = self.get_tokenizer()
A = self.get_feature_extractor()
A = self.get_decoder()
A = WavaVecaProcessorWithLM(tokenizer=__UpperCamelCase , feature_extractor=__UpperCamelCase , decoder=__UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
A = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __UpperCamelCase )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __UpperCamelCase )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , __UpperCamelCase )
def lowerCamelCase ( self :Optional[Any] ):
A = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
A = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def lowerCamelCase ( self :Dict ):
A = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["xx"] )
with self.assertRaisesRegex(__UpperCamelCase , "include" ):
WavaVecaProcessorWithLM(
tokenizer=__UpperCamelCase , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def lowerCamelCase ( self :Tuple ):
A = self.get_feature_extractor()
A = self.get_tokenizer()
A = self.get_decoder()
A = WavaVecaProcessorWithLM(tokenizer=__UpperCamelCase , feature_extractor=__UpperCamelCase , decoder=__UpperCamelCase )
A = floats_list((3, 10_00) )
A = feature_extractor(__UpperCamelCase , return_tensors="np" )
A = processor(__UpperCamelCase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCamelCase ( self :Tuple ):
A = self.get_feature_extractor()
A = self.get_tokenizer()
A = self.get_decoder()
A = WavaVecaProcessorWithLM(tokenizer=__UpperCamelCase , feature_extractor=__UpperCamelCase , decoder=__UpperCamelCase )
A = "This is a test string"
A = processor(text=__UpperCamelCase )
A = tokenizer(__UpperCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :int=(2, 10, 16) , __UpperCamelCase :Optional[Any]=77 ):
np.random.seed(__UpperCamelCase )
return np.random.rand(*__UpperCamelCase )
def lowerCamelCase ( self :List[str] ):
A = self.get_feature_extractor()
A = self.get_tokenizer()
A = self.get_decoder()
A = WavaVecaProcessorWithLM(tokenizer=__UpperCamelCase , feature_extractor=__UpperCamelCase , decoder=__UpperCamelCase )
A = self._get_dummy_logits(shape=(10, 16) , seed=13 )
A = processor.decode(__UpperCamelCase )
A = decoder.decode_beams(__UpperCamelCase )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual("</s> <s> </s>" , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ["fork"], ["spawn"]] )
def lowerCamelCase ( self :Any , __UpperCamelCase :Optional[int] ):
A = self.get_feature_extractor()
A = self.get_tokenizer()
A = self.get_decoder()
A = WavaVecaProcessorWithLM(tokenizer=__UpperCamelCase , feature_extractor=__UpperCamelCase , decoder=__UpperCamelCase )
A = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
A = processor.batch_decode(__UpperCamelCase )
else:
with get_context(__UpperCamelCase ).Pool() as pool:
A = processor.batch_decode(__UpperCamelCase , __UpperCamelCase )
A = list(__UpperCamelCase )
with get_context("fork" ).Pool() as p:
A = decoder.decode_beams_batch(__UpperCamelCase , __UpperCamelCase )
A = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(__UpperCamelCase , decoded_processor.text )
self.assertListEqual(["<s> <s> </s>", "<s> <s> <s>"] , decoded_processor.text )
self.assertListEqual(__UpperCamelCase , decoded_processor.logit_score )
self.assertListEqual(__UpperCamelCase , decoded_processor.lm_score )
def lowerCamelCase ( self :Dict ):
A = self.get_feature_extractor()
A = self.get_tokenizer()
A = self.get_decoder()
A = WavaVecaProcessorWithLM(tokenizer=__UpperCamelCase , feature_extractor=__UpperCamelCase , decoder=__UpperCamelCase )
A = self._get_dummy_logits()
A = 15
A = -20.0
A = -4.0
A = processor.batch_decode(
__UpperCamelCase , beam_width=__UpperCamelCase , beam_prune_logp=__UpperCamelCase , token_min_logp=__UpperCamelCase , )
A = decoded_processor_out.text
A = list(__UpperCamelCase )
with get_context("fork" ).Pool() as pool:
A = decoder.decode_beams_batch(
__UpperCamelCase , __UpperCamelCase , beam_width=__UpperCamelCase , beam_prune_logp=__UpperCamelCase , token_min_logp=__UpperCamelCase , )
A = [d[0][0] for d in decoded_decoder_out]
A = [d[0][2] for d in decoded_decoder_out]
A = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
self.assertListEqual(["</s> <s> <s>", "<s> <s> <s>"] , __UpperCamelCase )
self.assertTrue(np.array_equal(__UpperCamelCase , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , __UpperCamelCase , atol=1e-3 ) )
self.assertTrue(np.array_equal(__UpperCamelCase , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9_474] , __UpperCamelCase , atol=1e-3 ) )
def lowerCamelCase ( self :int ):
A = self.get_feature_extractor()
A = self.get_tokenizer()
A = self.get_decoder()
A = WavaVecaProcessorWithLM(tokenizer=__UpperCamelCase , feature_extractor=__UpperCamelCase , decoder=__UpperCamelCase )
A = self._get_dummy_logits()
A = 2.0
A = 5.0
A = -20.0
A = True
A = processor.batch_decode(
__UpperCamelCase , alpha=__UpperCamelCase , beta=__UpperCamelCase , unk_score_offset=__UpperCamelCase , lm_score_boundary=__UpperCamelCase , )
A = decoded_processor_out.text
A = list(__UpperCamelCase )
decoder.reset_params(
alpha=__UpperCamelCase , beta=__UpperCamelCase , unk_score_offset=__UpperCamelCase , lm_score_boundary=__UpperCamelCase , )
with get_context("fork" ).Pool() as pool:
A = decoder.decode_beams_batch(
__UpperCamelCase , __UpperCamelCase , )
A = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
self.assertListEqual(["<s> </s> <s> </s> </s>", "</s> </s> <s> </s> </s>"] , __UpperCamelCase )
A = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , __UpperCamelCase )
def lowerCamelCase ( self :List[str] ):
A = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
A = processor.decoder.model_container[processor.decoder._model_key]
A = Path(language_model._kenlm_model.path.decode("utf-8" ) ).parent.parent.absolute()
A = os.listdir(__UpperCamelCase )
A = ["alphabet.json", "language_model"]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
def lowerCamelCase ( self :Dict ):
A = snapshot_download("hf-internal-testing/processor_with_lm" )
A = WavaVecaProcessorWithLM.from_pretrained(__UpperCamelCase )
A = processor.decoder.model_container[processor.decoder._model_key]
A = Path(language_model._kenlm_model.path.decode("utf-8" ) ).parent.parent.absolute()
A = os.listdir(__UpperCamelCase )
A = os.listdir(__UpperCamelCase )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
def lowerCamelCase ( self :Union[str, Any] ):
A = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
A = AutoProcessor.from_pretrained("hf-internal-testing/processor_with_lm" )
A = floats_list((3, 10_00) )
A = processor_wavaveca(__UpperCamelCase , return_tensors="np" )
A = processor_auto(__UpperCamelCase , return_tensors="np" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 )
A = self._get_dummy_logits()
A = processor_wavaveca.batch_decode(__UpperCamelCase )
A = processor_auto.batch_decode(__UpperCamelCase )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def lowerCamelCase ( self :Tuple ):
A = self.get_feature_extractor()
A = self.get_tokenizer()
A = self.get_decoder()
A = WavaVecaProcessorWithLM(tokenizer=__UpperCamelCase , feature_extractor=__UpperCamelCase , decoder=__UpperCamelCase )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg="`processor` and `feature_extractor` model input names do not match" , )
@staticmethod
def lowerCamelCase ( __UpperCamelCase :str , __UpperCamelCase :Optional[int] ):
A = [d[key] for d in offsets]
return retrieved_list
def lowerCamelCase ( self :List[Any] ):
A = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
A = self._get_dummy_logits()[0]
A = processor.decode(__UpperCamelCase , output_word_offsets=__UpperCamelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("text" in outputs )
self.assertTrue("word_offsets" in outputs )
self.assertTrue(isinstance(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(" ".join(self.get_from_offsets(outputs["word_offsets"] , "word" ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "word" ) , ["<s>", "<s>", "</s>"] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "start_offset" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "end_offset" ) , [1, 3, 5] )
def lowerCamelCase ( self :List[Any] ):
A = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
A = self._get_dummy_logits()
A = processor.batch_decode(__UpperCamelCase , output_word_offsets=__UpperCamelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("text" in outputs )
self.assertTrue("word_offsets" in outputs )
self.assertTrue(isinstance(__UpperCamelCase , __UpperCamelCase ) )
self.assertListEqual(
[" ".join(self.get_from_offsets(__UpperCamelCase , "word" ) ) for o in outputs["word_offsets"]] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "word" ) , ["<s>", "<s>", "</s>"] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "start_offset" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "end_offset" ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def lowerCamelCase ( self :List[str] ):
import torch
A = load_dataset("common_voice" , "en" , split="train" , streaming=__UpperCamelCase )
A = ds.cast_column("audio" , datasets.Audio(sampling_rate=1_60_00 ) )
A = iter(__UpperCamelCase )
A = next(__UpperCamelCase )
A = AutoProcessor.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm" )
A = WavaVecaForCTC.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
A = processor(sample["audio"]["array"] , return_tensors="pt" ).input_values
with torch.no_grad():
A = model(__UpperCamelCase ).logits.cpu().numpy()
A = processor.decode(logits[0] , output_word_offsets=__UpperCamelCase )
A = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
A = [
{
"start_time": d["start_offset"] * time_offset,
"end_time": d["end_offset"] * time_offset,
"word": d["word"],
}
for d in output["word_offsets"]
]
A = "WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"
# output words
self.assertEqual(" ".join(self.get_from_offsets(__UpperCamelCase , "word" ) ) , __UpperCamelCase )
self.assertEqual(" ".join(self.get_from_offsets(__UpperCamelCase , "word" ) ) , output.text )
# output times
A = torch.tensor(self.get_from_offsets(__UpperCamelCase , "start_time" ) )
A = torch.tensor(self.get_from_offsets(__UpperCamelCase , "end_time" ) )
# fmt: off
A = torch.tensor([1.4_199, 1.6_599, 2.2_599, 3.0, 3.24, 3.5_999, 3.7_999, 4.0_999, 4.26, 4.94, 5.28, 5.6_599, 5.78, 5.94, 6.32, 6.5_399, 6.6_599] )
A = torch.tensor([1.5_399, 1.8_999, 2.9, 3.16, 3.5_399, 3.72, 4.0_199, 4.1_799, 4.76, 5.1_599, 5.5_599, 5.6_999, 5.86, 6.1_999, 6.38, 6.6_199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=0.01 ) )
self.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=0.01 ) )
| 292 |
'''simple docstring'''
import argparse
import copy
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[str] = {}
with open(__magic_name__ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
UpperCAmelCase : List[Any] = []
_list.append([line.split()[1], line.split()[2]] )
UpperCAmelCase : Tuple = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
UpperCAmelCase : Any = []
_list.append([line.split()[0], line.split()[2]] )
UpperCAmelCase : int = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
with open(__magic_name__ ) as f:
UpperCAmelCase : List[str] = f.read(1 )
UpperCAmelCase : List[Any] = start_node
UpperCAmelCase : Union[str, Any] = []
UpperCAmelCase : Any = start_node
UpperCAmelCase : Optional[Any] = 0
while visiting not in first_solution:
UpperCAmelCase : Optional[Any] = 1_0000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(__magic_name__ ) and k[0] not in first_solution:
UpperCAmelCase : Tuple = k[1]
UpperCAmelCase : Dict = k[0]
first_solution.append(__magic_name__ )
UpperCAmelCase : int = distance_of_first_solution + int(__magic_name__ )
UpperCAmelCase : str = best_node
first_solution.append(__magic_name__ )
UpperCAmelCase : int = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
UpperCAmelCase : str = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_0000
)
return first_solution, distance_of_first_solution
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = []
for n in solution[1:-1]:
UpperCAmelCase : Any = solution.index(__magic_name__ )
for kn in solution[1:-1]:
UpperCAmelCase : Dict = solution.index(__magic_name__ )
if n == kn:
continue
UpperCAmelCase : Tuple = copy.deepcopy(__magic_name__ )
UpperCAmelCase : Optional[int] = kn
UpperCAmelCase : List[str] = n
UpperCAmelCase : str = 0
for k in _tmp[:-1]:
UpperCAmelCase : List[Any] = _tmp[_tmp.index(__magic_name__ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
UpperCAmelCase : List[Any] = distance + int(i[1] )
_tmp.append(__magic_name__ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
UpperCAmelCase : List[str] = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda __magic_name__ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[Any] = 1
UpperCAmelCase : List[str] = first_solution
UpperCAmelCase : str = []
UpperCAmelCase : Union[str, Any] = distance_of_first_solution
UpperCAmelCase : Union[str, Any] = solution
while count <= iters:
UpperCAmelCase : int = find_neighborhood(__magic_name__ , __magic_name__ )
UpperCAmelCase : Any = 0
UpperCAmelCase : List[str] = neighborhood[index_of_best_solution]
UpperCAmelCase : Dict = len(__magic_name__ ) - 1
UpperCAmelCase : Dict = False
while not found:
UpperCAmelCase : List[Any] = 0
while i < len(__magic_name__ ):
if best_solution[i] != solution[i]:
UpperCAmelCase : int = best_solution[i]
UpperCAmelCase : Optional[int] = solution[i]
break
UpperCAmelCase : List[str] = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
UpperCAmelCase : List[str] = True
UpperCAmelCase : List[Any] = best_solution[:-1]
UpperCAmelCase : str = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
UpperCAmelCase : Union[str, Any] = cost
UpperCAmelCase : Tuple = solution
else:
UpperCAmelCase : Optional[Any] = index_of_best_solution + 1
UpperCAmelCase : str = neighborhood[index_of_best_solution]
if len(__magic_name__ ) >= size:
tabu_list.pop(0 )
UpperCAmelCase : int = count + 1
return best_solution_ever, best_cost
def lowercase ( __magic_name__=None ):
'''simple docstring'''
UpperCAmelCase : Dict = generate_neighbours(args.File )
UpperCAmelCase , UpperCAmelCase : Any = generate_first_solution(
args.File , __magic_name__ )
UpperCAmelCase , UpperCAmelCase : Any = tabu_search(
__magic_name__ , __magic_name__ , __magic_name__ , args.Iterations , args.Size , )
print(F"Best solution: {best_sol}, with total distance: {best_cost}." )
if __name__ == "__main__":
a : Union[str, Any] = argparse.ArgumentParser(description="Tabu Search")
parser.add_argument(
"-f",
"--File",
type=str,
help="Path to the file containing the data",
required=True,
)
parser.add_argument(
"-i",
"--Iterations",
type=int,
help="How many iterations the algorithm should perform",
required=True,
)
parser.add_argument(
"-s", "--Size", type=int, help="Size of the tabu list", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 311 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : Any = logging.get_logger(__name__)
UpperCAmelCase : Any = {
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"
),
"google/realm-orqa-nq-openqa": "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json",
"google/realm-orqa-nq-reader": "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json",
"google/realm-orqa-wq-openqa": "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json",
"google/realm-orqa-wq-reader": "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class __lowercase ( a_ ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = "realm"
def __init__( self , A=3_05_22 , A=7_68 , A=1_28 , A=12 , A=12 , A=8 , A=30_72 , A="gelu_new" , A=0.1 , A=0.1 , A=5_12 , A=2 , A=0.02 , A=1e-1_2 , A=2_56 , A=10 , A=1e-3 , A=5 , A=3_20 , A=13_35_37_18 , A=50_00 , A=1 , A=0 , A=2 , **A , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A )
# Common config
lowerCamelCase = vocab_size
lowerCamelCase = max_position_embeddings
lowerCamelCase = hidden_size
lowerCamelCase = retriever_proj_size
lowerCamelCase = num_hidden_layers
lowerCamelCase = num_attention_heads
lowerCamelCase = num_candidates
lowerCamelCase = intermediate_size
lowerCamelCase = hidden_act
lowerCamelCase = hidden_dropout_prob
lowerCamelCase = attention_probs_dropout_prob
lowerCamelCase = initializer_range
lowerCamelCase = type_vocab_size
lowerCamelCase = layer_norm_eps
# Reader config
lowerCamelCase = span_hidden_size
lowerCamelCase = max_span_width
lowerCamelCase = reader_layer_norm_eps
lowerCamelCase = reader_beam_size
lowerCamelCase = reader_seq_len
# Retrieval config
lowerCamelCase = num_block_records
lowerCamelCase = searcher_beam_size
| 66 |
from random import randint
from tempfile import TemporaryFile
import numpy as np
def __lowerCamelCase ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : str ):
'''simple docstring'''
lowerCamelCase = 0
if start < end:
lowerCamelCase = randint(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase = a[end]
lowerCamelCase = a[pivot]
lowerCamelCase = temp
lowerCamelCase , lowerCamelCase = _in_place_partition(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
count += _in_place_quick_sort(lowerCamelCase__ , lowerCamelCase__ , p - 1 )
count += _in_place_quick_sort(lowerCamelCase__ , p + 1 , lowerCamelCase__ )
return count
def __lowerCamelCase ( lowerCamelCase__ : int , lowerCamelCase__ : Dict , lowerCamelCase__ : str ):
'''simple docstring'''
lowerCamelCase = 0
lowerCamelCase = randint(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase = a[end]
lowerCamelCase = a[pivot]
lowerCamelCase = temp
lowerCamelCase = start - 1
for index in range(lowerCamelCase__ , lowerCamelCase__ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
lowerCamelCase = new_pivot_index + 1
lowerCamelCase = a[new_pivot_index]
lowerCamelCase = a[index]
lowerCamelCase = temp
lowerCamelCase = a[new_pivot_index + 1]
lowerCamelCase = a[end]
lowerCamelCase = temp
return new_pivot_index + 1, count
UpperCAmelCase : Dict = TemporaryFile()
UpperCAmelCase : Dict = 1_00 # 1000 elements are to be sorted
UpperCAmelCase, UpperCAmelCase : Optional[int] = 0, 1 # mean and standard deviation
UpperCAmelCase : List[str] = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("The array is")
print(X)
outfile.seek(0) # using the same array
UpperCAmelCase : List[Any] = np.load(outfile)
UpperCAmelCase : Optional[Any] = len(M) - 1
UpperCAmelCase : List[str] = _in_place_quick_sort(M, 0, r)
print(
"No of Comparisons for 100 elements selected from a standard normal distribution"
"is :"
)
print(z)
| 66 | 1 |
from __future__ import annotations
import requests
_lowerCamelCase =set(
"""approved_at_utc approved_by author_flair_background_color
author_flair_css_class author_flair_richtext author_flair_template_id author_fullname
author_premium can_mod_post category clicked content_categories created_utc downs
edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta
is_original_content is_reddit_media_domain is_video link_flair_css_class
link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title
name permalink pwls quarantine saved score secure_media secure_media_embed selftext
subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type
total_awards_received ups upvote_ratio url user_reports""".split()
)
def _a ( lowerCamelCase, lowerCamelCase = 1, lowerCamelCase = "new", lowerCamelCase = None ):
lowerCamelCase : Tuple = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(lowerCamelCase ) - valid_terms ) ):
lowerCamelCase : Tuple = F'''Invalid search term: {invalid_search_terms}'''
raise ValueError(lowerCamelCase )
lowerCamelCase : Tuple = requests.get(
F'''https://reddit.com/r/{subreddit}/{age}.json?limit={limit}''', headers={"""User-agent""": """A random string"""}, )
if response.status_code == 429:
raise requests.HTTPError
lowerCamelCase : List[Any] = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(lowerCamelCase )}
lowerCamelCase : Dict = {}
for id_ in range(lowerCamelCase ):
lowerCamelCase : Dict = {
item: data["""data"""]["""children"""][id_]["""data"""][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data("""learnpython""", wanted_data=["""title""", """url""", """selftext"""]))
| 287 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase):
@slow
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
lowerCamelCase : Any = tf.convert_to_tensor(
[[5, 1_2_1, 1_1, 6_6_0, 1_6, 7_3_0, 2_5_5_4_3, 1_1_0, 8_3, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
lowerCamelCase : str = model(__magic_name__ )["""last_hidden_state"""]
lowerCamelCase : Union[str, Any] = tf.TensorShape((1, 1_0, 7_6_8) )
self.assertEqual(output.shape , __magic_name__ )
# compare the actual values for a slice.
lowerCamelCase : Dict = tf.convert_to_tensor(
[[[-0.0_254, 0.0_235, 0.1_027], [0.0_606, -0.1_811, -0.0_418], [-0.1_561, -0.1_127, 0.2_687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 287 | 1 |
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
__lowerCamelCase = logging.getLogger(__name__)
class A__ ( _snake_case ):
lowercase = "token-classification"
def __init__( self , UpperCamelCase__ ) -> Any:
'''simple docstring'''
if type(UpperCamelCase__ ) == dict:
A_ = Namespace(**UpperCamelCase__ )
A_ = import_module("""tasks""" )
try:
A_ = getattr(UpperCamelCase__ , hparams.task_type )
A_ = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f'''Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
f'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
A_ = self.token_classification_task.get_labels(hparams.labels )
A_ = CrossEntropyLoss().ignore_index
super().__init__(UpperCamelCase__ , len(self.labels ) , self.mode )
def snake_case_ ( self , **UpperCamelCase__ ) -> int:
'''simple docstring'''
return self.model(**UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
A_ = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
A_ = (
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
A_ = self(**UpperCamelCase__ )
A_ = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def snake_case_ ( self ) -> Any:
'''simple docstring'''
A_ = self.hparams
for mode in ["train", "dev", "test"]:
A_ = self._feature_file(UpperCamelCase__ )
if os.path.exists(UpperCamelCase__ ) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" , UpperCamelCase__ )
A_ = torch.load(UpperCamelCase__ )
else:
logger.info("""Creating features from dataset file at %s""" , args.data_dir )
A_ = self.token_classification_task.read_examples_from_file(args.data_dir , UpperCamelCase__ )
A_ = self.token_classification_task.convert_examples_to_features(
UpperCamelCase__ , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["""xlnet"""] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["""xlnet"""] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=UpperCamelCase__ , pad_on_left=bool(self.config.model_type in ["""xlnet"""] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info("""Saving features into cached file %s""" , UpperCamelCase__ )
torch.save(UpperCamelCase__ , UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = False ) -> DataLoader:
'''simple docstring'''
A_ = self._feature_file(UpperCamelCase__ )
logger.info("""Loading features from cached file %s""" , UpperCamelCase__ )
A_ = torch.load(UpperCamelCase__ )
A_ = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
A_ = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
A_ = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
A_ = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
A_ = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , batch_size=UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
"""Compute validation""" ""
A_ = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
A_ = (
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
A_ = self(**UpperCamelCase__ )
A_ , A_ = outputs[:2]
A_ = logits.detach().cpu().numpy()
A_ = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def snake_case_ ( self , UpperCamelCase__ ) -> int:
'''simple docstring'''
A_ = torch.stack([x["""val_loss"""] for x in outputs] ).mean()
A_ = np.concatenate([x["""pred"""] for x in outputs] , axis=0 )
A_ = np.argmax(UpperCamelCase__ , axis=2 )
A_ = np.concatenate([x["""target"""] for x in outputs] , axis=0 )
A_ = dict(enumerate(self.labels ) )
A_ = [[] for _ in range(out_label_ids.shape[0] )]
A_ = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
A_ = {
"""val_loss""": val_loss_mean,
"""accuracy_score""": accuracy_score(UpperCamelCase__ , UpperCamelCase__ ),
"""precision""": precision_score(UpperCamelCase__ , UpperCamelCase__ ),
"""recall""": recall_score(UpperCamelCase__ , UpperCamelCase__ ),
"""f1""": fa_score(UpperCamelCase__ , UpperCamelCase__ ),
}
A_ = dict(results.items() )
A_ = results
return ret, preds_list, out_label_list
def snake_case_ ( self , UpperCamelCase__ ) -> int:
'''simple docstring'''
A_ , A_ , A_ = self._eval_end(UpperCamelCase__ )
A_ = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def snake_case_ ( self , UpperCamelCase__ ) -> str:
'''simple docstring'''
A_ , A_ , A_ = self._eval_end(UpperCamelCase__ )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
A_ = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def snake_case_ ( UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
BaseTransformer.add_model_specific_args(UpperCamelCase__ , UpperCamelCase__ )
parser.add_argument(
"""--task_type""" , default="""NER""" , type=UpperCamelCase__ , help="""Task type to fine tune in training (e.g. NER, POS, etc)""" )
parser.add_argument(
"""--max_seq_length""" , default=128 , type=UpperCamelCase__ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--labels""" , default="""""" , type=UpperCamelCase__ , help="""Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.""" , )
parser.add_argument(
"""--gpus""" , default=0 , type=UpperCamelCase__ , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
return parser
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
__lowerCamelCase = NERTransformer.add_model_specific_args(parser, os.getcwd())
__lowerCamelCase = parser.parse_args()
__lowerCamelCase = NERTransformer(args)
__lowerCamelCase = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
__lowerCamelCase = sorted(glob.glob(os.path.join(args.output_dir, '''checkpoint-epoch=*.ckpt'''), recursive=True))
__lowerCamelCase = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 361 |
'''simple docstring'''
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class A__ :
def __init__( self , UpperCamelCase__ , UpperCamelCase__=2 , UpperCamelCase__=3 , UpperCamelCase__=4 , UpperCamelCase__=2 , UpperCamelCase__=7 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=99 , UpperCamelCase__=36 , UpperCamelCase__=3 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=16 , UpperCamelCase__=2 , UpperCamelCase__=0.02 , UpperCamelCase__=6 , UpperCamelCase__=6 , UpperCamelCase__=3 , UpperCamelCase__=4 , UpperCamelCase__=None , UpperCamelCase__=1000 , ) -> Optional[int]:
'''simple docstring'''
A_ = parent
A_ = batch_size
A_ = num_channels
A_ = image_size
A_ = patch_size
A_ = text_seq_length
A_ = is_training
A_ = use_input_mask
A_ = use_token_type_ids
A_ = use_labels
A_ = vocab_size
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = max_position_embeddings
A_ = type_vocab_size
A_ = type_sequence_label_size
A_ = initializer_range
A_ = coordinate_size
A_ = shape_size
A_ = num_labels
A_ = num_choices
A_ = scope
A_ = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
A_ = text_seq_length
A_ = (image_size // patch_size) ** 2 + 1
A_ = self.text_seq_length + self.image_seq_length
def snake_case_ ( self ) -> Any:
'''simple docstring'''
A_ = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
A_ = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
A_ = bbox[i, j, 3]
A_ = bbox[i, j, 1]
A_ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
A_ = bbox[i, j, 2]
A_ = bbox[i, j, 0]
A_ = t
A_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ = None
if self.use_input_mask:
A_ = random_attention_mask([self.batch_size, self.text_seq_length] )
A_ = None
if self.use_token_type_ids:
A_ = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
A_ = None
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
A_ = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> str:
'''simple docstring'''
A_ = LayoutLMvaModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
# text + image
A_ = model(UpperCamelCase__ , pixel_values=UpperCamelCase__ )
A_ = model(
UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
A_ = model(UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
A_ = model(UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
A_ = model(UpperCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
A_ = model(pixel_values=UpperCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
A_ = self.num_labels
A_ = LayoutLMvaForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A_ = model(
UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> str:
'''simple docstring'''
A_ = self.num_labels
A_ = LayoutLMvaForTokenClassification(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A_ = model(
UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int:
'''simple docstring'''
A_ = LayoutLMvaForQuestionAnswering(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A_ = model(
UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) = config_and_inputs
A_ = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""pixel_values""": pixel_values,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class A__ ( _snake_case , _snake_case , unittest.TestCase ):
lowercase = False
lowercase = False
lowercase = False
lowercase = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowercase = (
{"document-question-answering": LayoutLMvaForQuestionAnswering, "feature-extraction": LayoutLMvaModel}
if is_torch_available()
else {}
)
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = LayoutLMvaModelTester(self )
A_ = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False ) -> Union[str, Any]:
'''simple docstring'''
A_ = copy.deepcopy(UpperCamelCase__ )
if model_class in get_values(UpperCamelCase__ ):
A_ = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(UpperCamelCase__ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(UpperCamelCase__ ):
A_ = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__ )
elif model_class in get_values(UpperCamelCase__ ):
A_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__ )
A_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__ )
elif model_class in [
*get_values(UpperCamelCase__ ),
]:
A_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__ )
elif model_class in [
*get_values(UpperCamelCase__ ),
]:
A_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=UpperCamelCase__ , )
return inputs_dict
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A_ = type
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ )
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ )
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ )
@slow
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ = LayoutLMvaModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def UpperCAmelCase__ ( ) -> Dict:
A_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
class A__ ( unittest.TestCase ):
@cached_property
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=UpperCamelCase__ ) if is_vision_available() else None
@slow
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = LayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" ).to(UpperCamelCase__ )
A_ = self.default_image_processor
A_ = prepare_img()
A_ = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).pixel_values.to(UpperCamelCase__ )
A_ = torch.tensor([[1, 2]] )
A_ = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
A_ = model(
input_ids=input_ids.to(UpperCamelCase__ ) , bbox=bbox.to(UpperCamelCase__ ) , pixel_values=pixel_values.to(UpperCamelCase__ ) , )
# verify the logits
A_ = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , UpperCamelCase__ )
A_ = torch.tensor(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
| 101 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class SCREAMING_SNAKE_CASE__ :
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple=1_3 , SCREAMING_SNAKE_CASE__ : str=7 , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=9_9 , SCREAMING_SNAKE_CASE__ : Optional[Any]=3_2 , SCREAMING_SNAKE_CASE__ : List[str]=5 , SCREAMING_SNAKE_CASE__ : List[Any]=4 , SCREAMING_SNAKE_CASE__ : Tuple=3_7 , SCREAMING_SNAKE_CASE__ : Any="gelu" , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : int=5_1_2 , SCREAMING_SNAKE_CASE__ : int=1_6 , SCREAMING_SNAKE_CASE__ : Optional[int]=2 , SCREAMING_SNAKE_CASE__ : Any=0.02 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE__ : Optional[Any]=4 , SCREAMING_SNAKE_CASE__ : Optional[int]=None , ) -> Any:
a_ : Tuple = parent
a_ : int = batch_size
a_ : Tuple = seq_length
a_ : List[Any] = is_training
a_ : List[str] = use_token_type_ids
a_ : Dict = use_labels
a_ : Any = vocab_size
a_ : List[str] = hidden_size
a_ : Tuple = num_hidden_layers
a_ : List[Any] = num_attention_heads
a_ : Dict = intermediate_size
a_ : Any = hidden_act
a_ : List[str] = hidden_dropout_prob
a_ : Tuple = attention_probs_dropout_prob
a_ : Optional[Any] = max_position_embeddings
a_ : List[Any] = type_vocab_size
a_ : int = type_sequence_label_size
a_ : List[Any] = initializer_range
a_ : List[str] = num_labels
a_ : Union[str, Any] = num_choices
a_ : str = scope
a_ : Tuple = self.vocab_size - 1
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
a_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a_ : Any = None
if self.use_token_type_ids:
a_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a_ : List[Any] = None
a_ : Union[str, Any] = None
a_ : List[Any] = None
if self.use_labels:
a_ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a_ : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
a_ : Union[str, Any] = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
a_ : List[str] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , *SCREAMING_SNAKE_CASE__ : Tuple ) -> Union[str, Any]:
a_ : Dict = OpenAIGPTModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
a_ : str = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , head_mask=SCREAMING_SNAKE_CASE__ )
a_ : Dict = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )
a_ : Dict = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Any:
a_ : str = OpenAIGPTLMHeadModel(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
a_ : Optional[int] = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] , *SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Dict:
a_ : int = OpenAIGPTDoubleHeadsModel(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
a_ : str = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
a_ : Any = self.num_labels
a_ : Dict = OpenAIGPTForSequenceClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
a_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a_ : Any = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
a_ : Optional[Any] = self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) : Optional[Any] = config_and_inputs
a_ : Optional[int] = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
snake_case__ : Tuple = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
snake_case__ : List[str] = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
snake_case__ : Dict = (
{
'''feature-extraction''': OpenAIGPTModel,
'''text-classification''': OpenAIGPTForSequenceClassification,
'''text-generation''': OpenAIGPTLMHeadModel,
'''zero-shot''': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str] ) -> Dict:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def SCREAMING_SNAKE_CASE ( self : int , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any=False ) -> List[str]:
a_ : str = super()._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_labels=SCREAMING_SNAKE_CASE__ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
a_ : Optional[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ , )
a_ : str = inputs_dict['labels']
a_ : Optional[int] = inputs_dict['labels']
a_ : Optional[int] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ , )
a_ : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ )
return inputs_dict
def SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
a_ : str = OpenAIGPTModelTester(self )
a_ : int = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , n_embd=3_7 )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
a_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
a_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
a_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
a_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*SCREAMING_SNAKE_CASE__ )
@slow
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ : str = OpenAIGPTModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
a_ : Dict = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' )
model.to(SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = torch.tensor([[4_8_1, 4_7_3_5, 5_4_4]] , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ ) # the president is
a_ : Tuple = [
4_8_1,
4_7_3_5,
5_4_4,
2_4_6,
9_6_3,
8_7_0,
7_6_2,
2_3_9,
2_4_4,
4_0_4_7_7,
2_4_4,
2_4_9,
7_1_9,
8_8_1,
4_8_7,
5_4_4,
2_4_0,
2_4_4,
6_0_3,
4_8_1,
] # the president is a very good man. " \n " i\'m sure he is, " said the
a_ : Dict = model.generate(SCREAMING_SNAKE_CASE__ , do_sample=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(output_ids[0].tolist() , SCREAMING_SNAKE_CASE__ )
| 32 | '''simple docstring'''
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
__SCREAMING_SNAKE_CASE : Dict = get_logger(__name__)
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : List[str] , A : Optional[str] = None ):
_UpperCAmelCase : Dict = (
os.path.join(A , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
_UpperCAmelCase : Union[str, Any] = Extractor
def _A ( self : Tuple , A : str ):
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
_UpperCAmelCase : Dict = os.path.abspath(A )
return os.path.join(self.extract_dir , hash_url_to_filename(A ) )
def _A ( self : int , A : str , A : bool ):
return force_extract or (
not os.path.isfile(A ) and not (os.path.isdir(A ) and os.listdir(A ))
)
def _A ( self : Optional[int] , A : str , A : bool = False ):
_UpperCAmelCase : Union[str, Any] = self.extractor.infer_extractor_format(A )
if not extractor_format:
return input_path
_UpperCAmelCase : Optional[Any] = self._get_output_path(A )
if self._do_extract(A , A ):
self.extractor.extract(A , A , A )
return output_path
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
@classmethod
@abstractmethod
def _A ( cls : str , A : Union[Path, str] , **A : Dict ):
...
@staticmethod
@abstractmethod
def _A ( A : Union[Path, str] , A : Union[Path, str] ):
...
class lowerCamelCase_ (snake_case__ , snake_case__ ):
'''simple docstring'''
__UpperCamelCase: List[bytes] = []
@staticmethod
def _A ( A : Union[Path, str] , A : int ):
with open(A , "rb" ) as f:
return f.read(A )
@classmethod
def _A ( cls : Any , A : Union[Path, str] , A : bytes = b"" ):
if not magic_number:
_UpperCAmelCase : Any = max(len(A ) for cls_magic_number in cls.magic_numbers )
try:
_UpperCAmelCase : int = cls.read_magic_number(A , A )
except OSError:
return False
return any(magic_number.startswith(A ) for cls_magic_number in cls.magic_numbers )
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
@classmethod
def _A ( cls : str , A : Union[Path, str] , **A : List[Any] ):
return tarfile.is_tarfile(A )
@staticmethod
def _A ( A : Union[str, Any] , A : str ):
def resolved(A : str ) -> str:
return os.path.realpath(os.path.abspath(A ) )
def badpath(A : str , A : str ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(A , A ) ).startswith(A )
def badlink(A : str , A : str ) -> bool:
# Links are interpreted relative to the directory containing the link
_UpperCAmelCase : List[str] = resolved(os.path.join(A , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=A )
_UpperCAmelCase : Optional[int] = resolved(A )
for finfo in members:
if badpath(finfo.name , A ):
logger.error(F"""Extraction of {finfo.name} is blocked (illegal path)""" )
elif finfo.issym() and badlink(A , A ):
logger.error(F"""Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}""" )
elif finfo.islnk() and badlink(A , A ):
logger.error(F"""Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}""" )
else:
yield finfo
@staticmethod
def _A ( A : Union[Path, str] , A : Union[Path, str] ):
os.makedirs(A , exist_ok=A )
_UpperCAmelCase : int = tarfile.open(A )
tar_file.extractall(A , members=TarExtractor.safemembers(A , A ) )
tar_file.close()
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Union[str, Any] = [b"\x1F\x8B"]
@staticmethod
def _A ( A : Union[Path, str] , A : Union[Path, str] ):
with gzip.open(A , "rb" ) as gzip_file:
with open(A , "wb" ) as extracted_file:
shutil.copyfileobj(A , A )
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Dict = [
b"PK\x03\x04",
b"PK\x05\x06", # empty archive
b"PK\x07\x08", # spanned archive
]
@classmethod
def _A ( cls : Dict , A : Union[Path, str] , A : bytes = b"" ):
if super().is_extractable(A , magic_number=A ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(A , "rb" ) as fp:
_UpperCAmelCase : Tuple = _EndRecData(A )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
_UpperCAmelCase : Dict = fp.read(A ) # CD is where we expect it to be
if len(A ) == sizeCentralDir:
_UpperCAmelCase : Any = struct.unpack(A , A ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def _A ( A : Union[Path, str] , A : Union[Path, str] ):
os.makedirs(A , exist_ok=A )
with zipfile.ZipFile(A , "r" ) as zip_file:
zip_file.extractall(A )
zip_file.close()
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Dict = [b"\xFD\x37\x7A\x58\x5A\x00"]
@staticmethod
def _A ( A : Union[Path, str] , A : Union[Path, str] ):
with lzma.open(A ) as compressed_file:
with open(A , "wb" ) as extracted_file:
shutil.copyfileobj(A , A )
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: List[str] = [b"Rar!\x1a\x07\x00", b"Rar!\x1a\x07\x01\x00"] # RAR_ID # RAR5_ID
@staticmethod
def _A ( A : Union[Path, str] , A : Union[Path, str] ):
if not config.RARFILE_AVAILABLE:
raise ImportError("Please pip install rarfile" )
import rarfile
os.makedirs(A , exist_ok=A )
_UpperCAmelCase : List[str] = rarfile.RarFile(A )
rf.extractall(A )
rf.close()
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Optional[Any] = [b"\x28\xb5\x2F\xFD"]
@staticmethod
def _A ( A : Union[Path, str] , A : Union[Path, str] ):
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("Please pip install zstandard" )
import zstandard as zstd
_UpperCAmelCase : Optional[Any] = zstd.ZstdDecompressor()
with open(A , "rb" ) as ifh, open(A , "wb" ) as ofh:
dctx.copy_stream(A , A )
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Optional[Any] = [b"\x42\x5A\x68"]
@staticmethod
def _A ( A : Union[Path, str] , A : Union[Path, str] ):
with bza.open(A , "rb" ) as compressed_file:
with open(A , "wb" ) as extracted_file:
shutil.copyfileobj(A , A )
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: List[Any] = [b"\x37\x7A\xBC\xAF\x27\x1C"]
@staticmethod
def _A ( A : Union[Path, str] , A : Union[Path, str] ):
if not config.PY7ZR_AVAILABLE:
raise ImportError("Please pip install py7zr" )
import pyazr
os.makedirs(A , exist_ok=A )
with pyazr.SevenZipFile(A , "r" ) as archive:
archive.extractall(A )
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Optional[int] = [b"\x04\x22\x4D\x18"]
@staticmethod
def _A ( A : Union[Path, str] , A : Union[Path, str] ):
if not config.LZ4_AVAILABLE:
raise ImportError("Please pip install lz4" )
import lza.frame
with lza.frame.open(A , "rb" ) as compressed_file:
with open(A , "wb" ) as extracted_file:
shutil.copyfileobj(A , A )
class lowerCamelCase_ :
'''simple docstring'''
__UpperCamelCase: Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def _A ( cls : List[Any] ):
return max(
len(A )
for extractor in cls.extractors.values()
if issubclass(A , A )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def _A ( A : Union[Path, str] , A : int ):
try:
return MagicNumberBaseExtractor.read_magic_number(A , magic_number_length=A )
except OSError:
return b""
@classmethod
def _A ( cls : Optional[Any] , A : Union[Path, str] , A : bool = False ):
warnings.warn(
"Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'infer_extractor_format' instead." , category=A , )
_UpperCAmelCase : Union[str, Any] = cls.infer_extractor_format(A )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def _A ( cls : Dict , A : Union[Path, str] ): # <Added version="2.4.0"/>
_UpperCAmelCase : Optional[int] = cls._get_magic_number_max_length()
_UpperCAmelCase : str = cls._read_magic_number(A , A )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(A , magic_number=A ):
return extractor_format
@classmethod
def _A ( cls : List[str] , A : Union[Path, str] , A : Union[Path, str] , A : Optional[str] = None , A : Optional[BaseExtractor] = "deprecated" , ):
os.makedirs(os.path.dirname(A ) , exist_ok=A )
# Prevent parallel extractions
_UpperCAmelCase : Tuple = str(Path(A ).with_suffix(".lock" ) )
with FileLock(A ):
shutil.rmtree(A , ignore_errors=A )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(A , A ): # passed as positional arg
warnings.warn(
"Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'extractor_format' instead." , category=A , )
_UpperCAmelCase : Tuple = extractor if extractor != "deprecated" else extractor_format
else:
_UpperCAmelCase : Tuple = cls.extractors[extractor_format]
return extractor.extract(A , A )
else:
warnings.warn(
"Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an "
"exception in 3.0.0." , category=A , )
for extractor in cls.extractors.values():
if extractor.is_extractable(A ):
return extractor.extract(A , A )
| 31 | 0 |
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def a__ ( A__, A__, A__ ):
if isinstance(_lowercase, torch.Tensor ):
return image
elif isinstance(_lowercase, PIL.Image.Image ):
SCREAMING_SNAKE_CASE_ : int = [image]
if isinstance(image[0], PIL.Image.Image ):
SCREAMING_SNAKE_CASE_ : Dict = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
SCREAMING_SNAKE_CASE_ : Tuple = np.concatenate(_lowercase, axis=0 )
SCREAMING_SNAKE_CASE_ : Optional[int] = np.array(_lowercase ).astype(np.floataa ) / 2_55.0
SCREAMING_SNAKE_CASE_ : str = image.transpose(0, 3, 1, 2 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = 2.0 * image - 1.0
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.from_numpy(_lowercase )
elif isinstance(image[0], torch.Tensor ):
SCREAMING_SNAKE_CASE_ : str = torch.cat(_lowercase, dim=0 )
return image
def a__ ( A__, A__, A__, A__=0.99_95 ):
if not isinstance(_lowercase, np.ndarray ):
SCREAMING_SNAKE_CASE_ : Optional[int] = True
SCREAMING_SNAKE_CASE_ : Any = va.device
SCREAMING_SNAKE_CASE_ : int = va.cpu().numpy()
SCREAMING_SNAKE_CASE_ : Dict = va.cpu().numpy()
SCREAMING_SNAKE_CASE_ : int = np.sum(va * va / (np.linalg.norm(_lowercase ) * np.linalg.norm(_lowercase )) )
if np.abs(_lowercase ) > DOT_THRESHOLD:
SCREAMING_SNAKE_CASE_ : Dict = (1 - t) * va + t * va
else:
SCREAMING_SNAKE_CASE_ : str = np.arccos(_lowercase )
SCREAMING_SNAKE_CASE_ : Dict = np.sin(_lowercase )
SCREAMING_SNAKE_CASE_ : Optional[int] = theta_a * t
SCREAMING_SNAKE_CASE_ : Dict = np.sin(_lowercase )
SCREAMING_SNAKE_CASE_ : List[str] = np.sin(theta_a - theta_t ) / sin_theta_a
SCREAMING_SNAKE_CASE_ : List[Any] = sin_theta_t / sin_theta_a
SCREAMING_SNAKE_CASE_ : str = sa * va + sa * va
if inputs_are_torch:
SCREAMING_SNAKE_CASE_ : Any = torch.from_numpy(_lowercase ).to(_lowercase )
return va
def a__ ( A__, A__ ):
SCREAMING_SNAKE_CASE_ : str = F.normalize(_lowercase, dim=-1 )
SCREAMING_SNAKE_CASE_ : int = F.normalize(_lowercase, dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def a__ ( A__, A__ ):
for param in model.parameters():
SCREAMING_SNAKE_CASE_ : int = value
class __lowercase (a__ ):
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , ):
"""simple docstring"""
super().__init__()
self.register_modules(
vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , clip_model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , coca_model=lowerCAmelCase__ , coca_tokenizer=lowerCAmelCase__ , coca_transform=lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE_ : str = (
feature_extractor.size
if isinstance(feature_extractor.size , lowerCAmelCase__ )
else feature_extractor.size["shortest_edge"]
)
SCREAMING_SNAKE_CASE_ : Optional[Any] = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , lowerCAmelCase__ )
set_requires_grad(self.clip_model , lowerCAmelCase__ )
def UpperCamelCase__ ( self , lowerCAmelCase__ = "auto" ):
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
SCREAMING_SNAKE_CASE_ : int = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.enable_attention_slicing(lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
set_requires_grad(self.vae , lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
set_requires_grad(self.vae , lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
set_requires_grad(self.unet , lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
set_requires_grad(self.unet , lowerCAmelCase__ )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = min(int(num_inference_steps * strength ) , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = max(num_inference_steps - init_timestep , 0 )
SCREAMING_SNAKE_CASE_ : List[Any] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None ):
"""simple docstring"""
if not isinstance(lowerCAmelCase__ , torch.Tensor ):
raise ValueError(F'''`image` has to be of type `torch.Tensor` but is {type(lowerCAmelCase__ )}''' )
SCREAMING_SNAKE_CASE_ : int = image.to(device=lowerCAmelCase__ , dtype=lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ : str = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(lowerCAmelCase__ )
]
SCREAMING_SNAKE_CASE_ : Tuple = torch.cat(lowerCAmelCase__ , dim=0 )
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.vae.encode(lowerCAmelCase__ ).latent_dist.sample(lowerCAmelCase__ )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
SCREAMING_SNAKE_CASE_ : Optional[int] = 0.18_215 * init_latents
SCREAMING_SNAKE_CASE_ : int = init_latents.repeat_interleave(lowerCAmelCase__ , dim=0 )
SCREAMING_SNAKE_CASE_ : Tuple = randn_tensor(init_latents.shape , generator=lowerCAmelCase__ , device=lowerCAmelCase__ , dtype=lowerCAmelCase__ )
# get latents
SCREAMING_SNAKE_CASE_ : Any = self.scheduler.add_noise(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = init_latents
return latents
def UpperCamelCase__ ( self , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = self.coca_transform(lowerCAmelCase__ ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
SCREAMING_SNAKE_CASE_ : Any = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('<end_of_text>' )[0].replace('<start_of_text>' , '' ).rstrip(' .,' )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.feature_extractor.preprocess(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.from_numpy(clip_image_input['pixel_values'][0] ).unsqueeze(0 ).to(self.device ).half()
SCREAMING_SNAKE_CASE_ : int = self.clip_model.get_image_features(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = image_embeddings_clip.repeat_interleave(lowerCAmelCase__ , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = latents.detach().requires_grad_()
SCREAMING_SNAKE_CASE_ : int = self.scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
# predict the noise residual
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.unet(lowerCAmelCase__ , lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
SCREAMING_SNAKE_CASE_ : List[str] = self.scheduler.alphas_cumprod[timestep]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
SCREAMING_SNAKE_CASE_ : Tuple = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
SCREAMING_SNAKE_CASE_ : int = torch.sqrt(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ : int = self.scheduler.sigmas[index]
SCREAMING_SNAKE_CASE_ : Any = latents - sigma * noise_pred
else:
raise ValueError(F'''scheduler type {type(self.scheduler )} not supported''' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 1 / 0.18_215 * sample
SCREAMING_SNAKE_CASE_ : Optional[int] = self.vae.decode(lowerCAmelCase__ ).sample
SCREAMING_SNAKE_CASE_ : Any = (image / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE_ : int = transforms.Resize(self.feature_extractor_size )(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = self.normalize(lowerCAmelCase__ ).to(latents.dtype )
SCREAMING_SNAKE_CASE_ : List[str] = self.clip_model.get_image_features(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = spherical_dist_loss(lowerCAmelCase__ , lowerCAmelCase__ ).mean() * clip_guidance_scale
SCREAMING_SNAKE_CASE_ : int = -torch.autograd.grad(lowerCAmelCase__ , lowerCAmelCase__ )[0]
if isinstance(self.scheduler , lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ : str = latents.detach() + grads * (sigma**2)
SCREAMING_SNAKE_CASE_ : List[Any] = noise_pred_original
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = noise_pred_original - torch.sqrt(lowerCAmelCase__ ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = 5_1_2 , lowerCAmelCase__ = 5_1_2 , lowerCAmelCase__ = 0.6 , lowerCAmelCase__ = 5_0 , lowerCAmelCase__ = 7.5 , lowerCAmelCase__ = 1 , lowerCAmelCase__ = 0.0 , lowerCAmelCase__ = 1_0_0 , lowerCAmelCase__ = None , lowerCAmelCase__ = "pil" , lowerCAmelCase__ = True , lowerCAmelCase__ = 0.8 , lowerCAmelCase__ = 0.1 , lowerCAmelCase__ = 0.1 , ):
"""simple docstring"""
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and len(lowerCAmelCase__ ) != batch_size:
raise ValueError(F'''You have passed {batch_size} batch_size, but only {len(lowerCAmelCase__ )} generators.''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if isinstance(lowerCAmelCase__ , torch.Generator ) and batch_size > 1:
SCREAMING_SNAKE_CASE_ : Optional[int] = [generator] + [None] * (batch_size - 1)
SCREAMING_SNAKE_CASE_ : Optional[int] = [
("model", self.coca_model is None),
("tokenizer", self.coca_tokenizer is None),
("transform", self.coca_transform is None),
]
SCREAMING_SNAKE_CASE_ : List[Any] = [x[0] for x in coca_is_none if x[1]]
SCREAMING_SNAKE_CASE_ : str = ", ".join(lowerCAmelCase__ )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(lowerCAmelCase__ ):
raise ValueError(
F'''Content prompt is None and CoCa [{coca_is_none_str}] is None.'''
F'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
SCREAMING_SNAKE_CASE_ : int = self.get_image_description(lowerCAmelCase__ )
if style_prompt is None:
if len(lowerCAmelCase__ ):
raise ValueError(
F'''Style prompt is None and CoCa [{coca_is_none_str}] is None.'''
F''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_image_description(lowerCAmelCase__ )
# get prompt text embeddings for content and style
SCREAMING_SNAKE_CASE_ : str = self.tokenizer(
lowerCAmelCase__ , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=lowerCAmelCase__ , return_tensors='pt' , )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
SCREAMING_SNAKE_CASE_ : Optional[int] = self.tokenizer(
lowerCAmelCase__ , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=lowerCAmelCase__ , return_tensors='pt' , )
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
SCREAMING_SNAKE_CASE_ : Dict = slerp(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# duplicate text embeddings for each generation per prompt
SCREAMING_SNAKE_CASE_ : str = text_embeddings.repeat_interleave(lowerCAmelCase__ , dim=0 )
# set timesteps
SCREAMING_SNAKE_CASE_ : Optional[int] = "offset" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
SCREAMING_SNAKE_CASE_ : Optional[Any] = {}
if accepts_offset:
SCREAMING_SNAKE_CASE_ : Optional[Any] = 1
self.scheduler.set_timesteps(lowerCAmelCase__ , **lowerCAmelCase__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
SCREAMING_SNAKE_CASE_ : Any = self.get_timesteps(lowerCAmelCase__ , lowerCAmelCase__ , self.device )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = timesteps[:1].repeat(lowerCAmelCase__ )
# Preprocess image
SCREAMING_SNAKE_CASE_ : List[Any] = preprocess(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = self.prepare_latents(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , text_embeddings.dtype , self.device , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Any = preprocess(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.prepare_latents(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , text_embeddings.dtype , self.device , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = slerp(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if clip_guidance_scale > 0:
SCREAMING_SNAKE_CASE_ : List[str] = self.get_clip_image_embeddings(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = self.get_clip_image_embeddings(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : int = slerp(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
SCREAMING_SNAKE_CASE_ : int = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE_ : Dict = content_text_input.input_ids.shape[-1]
SCREAMING_SNAKE_CASE_ : Dict = self.tokenizer([''] , padding='max_length' , max_length=lowerCAmelCase__ , return_tensors='pt' )
SCREAMING_SNAKE_CASE_ : List[str] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
SCREAMING_SNAKE_CASE_ : Optional[int] = uncond_embeddings.repeat_interleave(lowerCAmelCase__ , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
SCREAMING_SNAKE_CASE_ : List[str] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
SCREAMING_SNAKE_CASE_ : Optional[Any] = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
SCREAMING_SNAKE_CASE_ : str = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
SCREAMING_SNAKE_CASE_ : Tuple = torch.randn(lowerCAmelCase__ , generator=lowerCAmelCase__ , device='cpu' , dtype=lowerCAmelCase__ ).to(
self.device )
else:
SCREAMING_SNAKE_CASE_ : List[str] = torch.randn(lowerCAmelCase__ , generator=lowerCAmelCase__ , device=self.device , dtype=lowerCAmelCase__ )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
SCREAMING_SNAKE_CASE_ : str = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
SCREAMING_SNAKE_CASE_ : Dict = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
SCREAMING_SNAKE_CASE_ : Any = {}
if accepts_eta:
SCREAMING_SNAKE_CASE_ : Optional[int] = eta
# check if the scheduler accepts generator
SCREAMING_SNAKE_CASE_ : Optional[int] = "generator" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = generator
with self.progress_bar(total=lowerCAmelCase__ ):
for i, t in enumerate(lowerCAmelCase__ ):
# expand the latents if we are doing classifier free guidance
SCREAMING_SNAKE_CASE_ : Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
SCREAMING_SNAKE_CASE_ : List[str] = self.scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
# predict the noise residual
SCREAMING_SNAKE_CASE_ : Any = self.unet(lowerCAmelCase__ , lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE_ : Any = noise_pred.chunk(2 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
SCREAMING_SNAKE_CASE_ : List[str] = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.cond_fn(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , )
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 1 / 0.18_215 * latents
SCREAMING_SNAKE_CASE_ : Tuple = self.vae.decode(lowerCAmelCase__ ).sample
SCREAMING_SNAKE_CASE_ : List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE_ : int = self.numpy_to_pil(lowerCAmelCase__ )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=lowerCAmelCase__ , nsfw_content_detected=lowerCAmelCase__ )
| 363 |
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def a__ ( ):
raise RuntimeError('CUDA out of memory.' )
class __lowercase (nn.Module ):
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE_ : int = nn.Linear(3 , 4 )
SCREAMING_SNAKE_CASE_ : Tuple = nn.BatchNormad(4 )
SCREAMING_SNAKE_CASE_ : str = nn.Linear(4 , 5 )
def UpperCamelCase__ ( self , lowerCAmelCase__ ):
"""simple docstring"""
return self.lineara(self.batchnorm(self.lineara(lowerCAmelCase__ ) ) )
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = []
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(lowerCAmelCase__ ):
nonlocal batch_sizes
batch_sizes.append(lowerCAmelCase__ )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(lowerCAmelCase__ , [1_2_8, 6_4, 3_2, 1_6, 8] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = []
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(lowerCAmelCase__ , lowerCAmelCase__ ):
nonlocal batch_sizes
batch_sizes.append(lowerCAmelCase__ )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = mock_training_loop_function('hello' )
self.assertListEqual(lowerCAmelCase__ , [1_2_8, 6_4, 3_2, 1_6, 8] )
self.assertListEqual([bs, arga] , [8, 'hello'] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(lowerCAmelCase__ ):
pass
with self.assertRaises(lowerCAmelCase__ ) as cm:
mock_training_loop_function()
self.assertIn('No executable batch size found, reached zero.' , cm.exception.args[0] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=1_6 )
def mock_training_loop_function(lowerCAmelCase__ ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(lowerCAmelCase__ ) as cm:
mock_training_loop_function()
self.assertIn('No executable batch size found, reached zero.' , cm.exception.args[0] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(lowerCAmelCase__ ) as cm:
mock_training_loop_function(1_2_8 , 'hello' , 'world' )
self.assertIn('Batch size was passed into `f`' , cm.exception.args[0] )
self.assertIn('`f(arg1=\'hello\', arg2=\'world\')' , cm.exception.args[0] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=1_6 )
def mock_training_loop_function(lowerCAmelCase__ ):
raise ValueError('Oops, we had an error!' )
with self.assertRaises(lowerCAmelCase__ ) as cm:
mock_training_loop_function()
self.assertIn('Oops, we had an error!' , cm.exception.args[0] )
@require_cuda
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.cuda.memory_allocated()
SCREAMING_SNAKE_CASE_ : Optional[int] = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = release_memory(lowerCAmelCase__ )
self.assertEqual(torch.cuda.memory_allocated() , lowerCAmelCase__ )
| 162 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/config.json",
"funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json",
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/config.json",
"funnel-transformer/medium-base": "https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json",
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/config.json",
"funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json",
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json",
"funnel-transformer/xlarge-base": "https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json",
}
class _SCREAMING_SNAKE_CASE( A ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = '''funnel'''
SCREAMING_SNAKE_CASE_ : List[Any] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
}
def __init__( self ,SCREAMING_SNAKE_CASE__=3_05_22 ,SCREAMING_SNAKE_CASE__=[4, 4, 4] ,SCREAMING_SNAKE_CASE__=None ,SCREAMING_SNAKE_CASE__=2 ,SCREAMING_SNAKE_CASE__=7_68 ,SCREAMING_SNAKE_CASE__=12 ,SCREAMING_SNAKE_CASE__=64 ,SCREAMING_SNAKE_CASE__=30_72 ,SCREAMING_SNAKE_CASE__="gelu_new" ,SCREAMING_SNAKE_CASE__=0.1 ,SCREAMING_SNAKE_CASE__=0.1 ,SCREAMING_SNAKE_CASE__=0.0 ,SCREAMING_SNAKE_CASE__=0.1 ,SCREAMING_SNAKE_CASE__=None ,SCREAMING_SNAKE_CASE__=1E-9 ,SCREAMING_SNAKE_CASE__="mean" ,SCREAMING_SNAKE_CASE__="relative_shift" ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=True ,**SCREAMING_SNAKE_CASE__ ,) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[str] = vocab_size
__SCREAMING_SNAKE_CASE :Union[str, Any] = block_sizes
__SCREAMING_SNAKE_CASE :Optional[Any] = [1] * len(SCREAMING_SNAKE_CASE__ ) if block_repeats is None else block_repeats
assert len(SCREAMING_SNAKE_CASE__ ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
__SCREAMING_SNAKE_CASE :List[str] = num_decoder_layers
__SCREAMING_SNAKE_CASE :Optional[int] = d_model
__SCREAMING_SNAKE_CASE :Any = n_head
__SCREAMING_SNAKE_CASE :List[Any] = d_head
__SCREAMING_SNAKE_CASE :List[str] = d_inner
__SCREAMING_SNAKE_CASE :List[Any] = hidden_act
__SCREAMING_SNAKE_CASE :List[Any] = hidden_dropout
__SCREAMING_SNAKE_CASE :List[str] = attention_dropout
__SCREAMING_SNAKE_CASE :Dict = activation_dropout
__SCREAMING_SNAKE_CASE :List[Any] = initializer_range
__SCREAMING_SNAKE_CASE :Any = initializer_std
__SCREAMING_SNAKE_CASE :List[Any] = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], f'''Got {pooling_type} for `pooling_type` but only \'mean\' and \'max\' are supported.'''
__SCREAMING_SNAKE_CASE :List[str] = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], f'''Got {attention_type} for `attention_type` but only \'relative_shift\' and \'factorized\' are supported.'''
__SCREAMING_SNAKE_CASE :Optional[int] = attention_type
__SCREAMING_SNAKE_CASE :Optional[int] = separate_cls
__SCREAMING_SNAKE_CASE :List[Any] = truncate_seq
__SCREAMING_SNAKE_CASE :Union[str, Any] = pool_q_only
super().__init__(**SCREAMING_SNAKE_CASE__ )
@property
def _UpperCamelCase ( self ) -> int:
"""simple docstring"""
return sum(self.block_sizes )
@num_hidden_layers.setter
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
raise NotImplementedError(
'''This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.''' )
@property
def _UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
return len(self.block_sizes )
@num_blocks.setter
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
raise NotImplementedError('''This model does not support the setting of `num_blocks`. Please set `block_sizes`.''' ) | 191 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {}
class _SCREAMING_SNAKE_CASE( A ):
SCREAMING_SNAKE_CASE_ : List[Any] = '''llama'''
SCREAMING_SNAKE_CASE_ : Optional[int] = ['''past_key_values''']
def __init__( self ,SCREAMING_SNAKE_CASE__=3_20_00 ,SCREAMING_SNAKE_CASE__=40_96 ,SCREAMING_SNAKE_CASE__=1_10_08 ,SCREAMING_SNAKE_CASE__=32 ,SCREAMING_SNAKE_CASE__=32 ,SCREAMING_SNAKE_CASE__=None ,SCREAMING_SNAKE_CASE__="silu" ,SCREAMING_SNAKE_CASE__=20_48 ,SCREAMING_SNAKE_CASE__=0.0_2 ,SCREAMING_SNAKE_CASE__=1E-6 ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=0 ,SCREAMING_SNAKE_CASE__=1 ,SCREAMING_SNAKE_CASE__=2 ,SCREAMING_SNAKE_CASE__=1 ,SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__=None ,**SCREAMING_SNAKE_CASE__ ,) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[Any] = vocab_size
__SCREAMING_SNAKE_CASE :int = max_position_embeddings
__SCREAMING_SNAKE_CASE :List[str] = hidden_size
__SCREAMING_SNAKE_CASE :Tuple = intermediate_size
__SCREAMING_SNAKE_CASE :List[str] = num_hidden_layers
__SCREAMING_SNAKE_CASE :List[Any] = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
__SCREAMING_SNAKE_CASE :Optional[int] = num_attention_heads
__SCREAMING_SNAKE_CASE :str = num_key_value_heads
__SCREAMING_SNAKE_CASE :Union[str, Any] = hidden_act
__SCREAMING_SNAKE_CASE :List[str] = initializer_range
__SCREAMING_SNAKE_CASE :Union[str, Any] = rms_norm_eps
__SCREAMING_SNAKE_CASE :Dict = pretraining_tp
__SCREAMING_SNAKE_CASE :Optional[Any] = use_cache
__SCREAMING_SNAKE_CASE :Optional[Any] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE__ ,bos_token_id=SCREAMING_SNAKE_CASE__ ,eos_token_id=SCREAMING_SNAKE_CASE__ ,tie_word_embeddings=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
def _UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling ,SCREAMING_SNAKE_CASE__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f'''got {self.rope_scaling}''' )
__SCREAMING_SNAKE_CASE :Optional[Any] = self.rope_scaling.get('''type''' ,SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Optional[int] = self.rope_scaling.get('''factor''' ,SCREAMING_SNAKE_CASE__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' ) | 191 | 1 |
from collections import defaultdict
def lowerCamelCase_ ( UpperCamelCase__ : str, UpperCamelCase__ : str ):
'''simple docstring'''
UpperCamelCase__ = first_str.lower().strip()
UpperCamelCase__ = second_str.lower().strip()
# Remove whitespace
UpperCamelCase__ = first_str.replace(''' ''', '''''' )
UpperCamelCase__ = second_str.replace(''' ''', '''''' )
# Strings of different lengths are not anagrams
if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ):
return False
# Default values for count should be 0
UpperCamelCase__ = defaultdict(lowerCAmelCase__ )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(lowerCAmelCase__ ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
lowercase = input("""Enter the first string """).strip()
lowercase = input("""Enter the second string """).strip()
lowercase = check_anagrams(input_a, input_b)
print(f'{input_a} and {input_b} are {"" if status else "not "}anagrams.')
| 351 | lowercase = {
"joule": 1.0,
"kilojoule": 1_0_0_0,
"megajoule": 1_0_0_0_0_0_0,
"gigajoule": 1_0_0_0_0_0_0_0_0_0,
"wattsecond": 1.0,
"watthour": 3_6_0_0,
"kilowatthour": 3_6_0_0_0_0_0,
"newtonmeter": 1.0,
"calorie_nutr": 4_1_8_6.8,
"kilocalorie_nutr": 4_1_8_6_8_0_0.0_0,
"electronvolt": 1.6_0217_6634E-19,
"britishthermalunit_it": 1_0_5_5.0_5_5_8_5,
"footpound": 1.35_58_18,
}
def lowerCamelCase_ ( UpperCamelCase__ : str, UpperCamelCase__ : str, UpperCamelCase__ : float ):
'''simple docstring'''
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
UpperCamelCase__ = (
F"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"""
F"""Valid values are: {", ".join(UpperCamelCase__ )}"""
)
raise ValueError(UpperCamelCase__ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 | 0 |
import random
class UpperCAmelCase_ :
@staticmethod
def snake_case__ ( __a):
'''simple docstring'''
_lowerCAmelCase : List[str] = [ord(lowerCamelCase__) for i in text]
_lowerCAmelCase : Union[str, Any] = []
_lowerCAmelCase : Dict = []
for i in plain:
_lowerCAmelCase : Tuple = random.randint(1, 300)
_lowerCAmelCase : Tuple = (i + k) * k
cipher.append(lowerCamelCase__)
key.append(lowerCamelCase__)
return cipher, key
@staticmethod
def snake_case__ ( __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = []
for i in range(len(lowerCamelCase__)):
_lowerCAmelCase : int = int((cipher[i] - (key[i]) ** 2) / key[i])
plain.append(chr(lowerCamelCase__))
return "".join(lowerCamelCase__)
if __name__ == "__main__":
_snake_case = Onepad().encrypt("Hello")
print(c, k)
print(Onepad().decrypt(c, k))
| 36 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class __magic_name__ ( unittest.TestCase):
def UpperCAmelCase__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ : Optional[int] = '''ZinengTang/tvlt-base'''
UpperCamelCase__ : int = tempfile.mkdtemp()
def UpperCAmelCase__ ( self : int , **lowerCamelCase__ : List[str] ) -> List[Any]:
'''simple docstring'''
return TvltImageProcessor.from_pretrained(self.checkpoint , **lowerCamelCase__ )
def UpperCAmelCase__ ( self : Optional[Any] , **lowerCamelCase__ : Tuple ) -> List[Any]:
'''simple docstring'''
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **lowerCamelCase__ )
def UpperCAmelCase__ ( self : str ) -> Tuple:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ ( self : Any ) -> int:
'''simple docstring'''
UpperCamelCase__ : int = self.get_image_processor()
UpperCamelCase__ : Union[str, Any] = self.get_feature_extractor()
UpperCamelCase__ : List[str] = TvltProcessor(image_processor=lowerCamelCase__ , feature_extractor=lowerCamelCase__ )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase__ : Optional[int] = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , lowerCamelCase__ )
self.assertIsInstance(processor.image_processor , lowerCamelCase__ )
def UpperCAmelCase__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : str = self.get_image_processor()
UpperCamelCase__ : List[Any] = self.get_feature_extractor()
UpperCamelCase__ : Dict = TvltProcessor(image_processor=lowerCamelCase__ , feature_extractor=lowerCamelCase__ )
UpperCamelCase__ : Any = np.ones([12000] )
UpperCamelCase__ : Union[str, Any] = feature_extractor(lowerCamelCase__ , return_tensors='''np''' )
UpperCamelCase__ : Any = processor(audio=lowerCamelCase__ , return_tensors='''np''' )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ : List[Any] = self.get_image_processor()
UpperCamelCase__ : Any = self.get_feature_extractor()
UpperCamelCase__ : int = TvltProcessor(image_processor=lowerCamelCase__ , feature_extractor=lowerCamelCase__ )
UpperCamelCase__ : int = np.ones([3, 224, 224] )
UpperCamelCase__ : List[str] = image_processor(lowerCamelCase__ , return_tensors='''np''' )
UpperCamelCase__ : str = processor(images=lowerCamelCase__ , return_tensors='''np''' )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : Any = self.get_image_processor()
UpperCamelCase__ : Dict = self.get_feature_extractor()
UpperCamelCase__ : Union[str, Any] = TvltProcessor(image_processor=lowerCamelCase__ , feature_extractor=lowerCamelCase__ )
UpperCamelCase__ : List[str] = np.ones([12000] )
UpperCamelCase__ : Tuple = np.ones([3, 224, 224] )
UpperCamelCase__ : Optional[Any] = processor(audio=lowerCamelCase__ , images=lowerCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , ['''audio_values''', '''audio_mask''', '''pixel_values''', '''pixel_mask'''] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase__ ):
processor()
def UpperCAmelCase__ ( self : Dict ) -> int:
'''simple docstring'''
UpperCamelCase__ : List[str] = self.get_image_processor()
UpperCamelCase__ : str = self.get_feature_extractor()
UpperCamelCase__ : Tuple = TvltProcessor(image_processor=lowerCamelCase__ , feature_extractor=lowerCamelCase__ )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='''`processor` and `image_processor`+`feature_extractor` model input names do not match''' , )
| 146 | 0 |
"""simple docstring"""
from __future__ import annotations
def a__ ( snake_case__ , snake_case__ , snake_case__ ) -> tuple[float, list[float]]:
lowerCamelCase = list(range(len(snake_case__ ) ) )
lowerCamelCase = [v / w for v, w in zip(snake_case__ , snake_case__ )]
index.sort(key=lambda snake_case__ : ratio[i] , reverse=snake_case__ )
lowerCamelCase = 0
lowerCamelCase = [0] * len(snake_case__ )
for i in index:
if weight[i] <= capacity:
lowerCamelCase = 1
max_value += value[i]
capacity -= weight[i]
else:
lowerCamelCase = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 366 |
"""simple docstring"""
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : List[str] = False, False, False
@dataclass
class __magic_name__ :
'''simple docstring'''
__UpperCamelCase = None
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = None
# Automatically constructed
__UpperCamelCase = "dict"
__UpperCamelCase = pa.struct({"bytes": pa.binary(), "path": pa.string()} )
__UpperCamelCase = field(default="Audio" , init=UpperCAmelCase__ , repr=UpperCAmelCase__ )
def __call__( self ):
"""simple docstring"""
return self.pa_type
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("""To support encoding audio data, please install 'soundfile'.""" ) from err
if isinstance(_a , _a ):
return {"bytes": None, "path": value}
elif isinstance(_a , _a ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
lowerCamelCase = BytesIO()
sf.write(_a , value["""array"""] , value["""sampling_rate"""] , format="""wav""" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("""pcm""" ):
# "PCM" only has raw audio bytes
if value.get("""sampling_rate""" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("""To use PCM files, please specify a 'sampling_rate' in Audio object""" )
if value.get("""bytes""" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
lowerCamelCase = np.frombuffer(value["""bytes"""] , dtype=np.intaa ).astype(np.floataa ) / 32_767
else:
lowerCamelCase = np.memmap(value["""path"""] , dtype="""h""" , mode="""r""" ).astype(np.floataa ) / 32_767
lowerCamelCase = BytesIO(bytes() )
sf.write(_a , _a , value["""sampling_rate"""] , format="""wav""" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
f'An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.' )
def _lowerCAmelCase ( self , _a , _a = None ):
"""simple docstring"""
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Audio(decode=True) instead.""" )
lowerCamelCase , lowerCamelCase = (value["""path"""], BytesIO(value["""bytes"""] )) if value["""bytes"""] is not None else (value["""path"""], None)
if path is None and file is None:
raise ValueError(f'An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("""To support decoding audio files, please install 'librosa' and 'soundfile'.""" ) from err
lowerCamelCase = xsplitext(_a )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"""Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"""Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ )
if file is None:
lowerCamelCase = token_per_repo_id or {}
lowerCamelCase = path.split("""::""" )[-1]
try:
lowerCamelCase = string_to_dict(_a , config.HUB_DATASETS_URL )["""repo_id"""]
lowerCamelCase = token_per_repo_id[repo_id]
except (ValueError, KeyError):
lowerCamelCase = None
with xopen(_a , """rb""" , use_auth_token=_a ) as f:
lowerCamelCase , lowerCamelCase = sf.read(_a )
else:
lowerCamelCase , lowerCamelCase = sf.read(_a )
lowerCamelCase = array.T
if self.mono:
lowerCamelCase = librosa.to_mono(_a )
if self.sampling_rate and self.sampling_rate != sampling_rate:
lowerCamelCase = librosa.resample(_a , orig_sr=_a , target_sr=self.sampling_rate )
lowerCamelCase = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def _lowerCAmelCase ( self ):
"""simple docstring"""
from .features import Value
if self.decode:
raise ValueError("""Cannot flatten a decoded Audio feature.""" )
return {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
if pa.types.is_string(storage.type ):
lowerCamelCase = pa.array([None] * len(_a ) , type=pa.binary() )
lowerCamelCase = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
lowerCamelCase = pa.array([None] * len(_a ) , type=pa.string() )
lowerCamelCase = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("""array""" ):
lowerCamelCase = pa.array([Audio().encode_example(_a ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
lowerCamelCase = storage.field("""bytes""" )
else:
lowerCamelCase = pa.array([None] * len(_a ) , type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
lowerCamelCase = storage.field("""path""" )
else:
lowerCamelCase = pa.array([None] * len(_a ) , type=pa.string() )
lowerCamelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
return array_cast(_a , self.pa_type )
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
@no_op_if_value_is_null
def path_to_bytes(_a ):
with xopen(_a , """rb""" ) as f:
lowerCamelCase = f.read()
return bytes_
lowerCamelCase = pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
lowerCamelCase = pa.array(
[os.path.basename(_a ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , )
lowerCamelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(_a , self.pa_type )
| 168 | 0 |
'''simple docstring'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def _UpperCamelCase ( __A , __A , __A , __A , __A ) -> Any:
'''simple docstring'''
UpperCamelCase__ = np.array([[1, item, train_mtch[i]] for i, item in enumerate(lowerCAmelCase__ )] )
UpperCamelCase__ = np.array(lowerCAmelCase__ )
UpperCamelCase__ = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , lowerCAmelCase__ ) ) , x.transpose() ) , lowerCAmelCase__ )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def _UpperCamelCase ( __A , __A , __A ) -> str:
'''simple docstring'''
UpperCamelCase__ = (1, 2, 1)
UpperCamelCase__ = (1, 1, 0, 7)
UpperCamelCase__ = SARIMAX(
lowerCAmelCase__ , exog=lowerCAmelCase__ , order=lowerCAmelCase__ , seasonal_order=lowerCAmelCase__ )
UpperCamelCase__ = model.fit(disp=lowerCAmelCase__ , maxiter=600 , method="nm" )
UpperCamelCase__ = model_fit.predict(1 , len(lowerCAmelCase__ ) , exog=[test_match] )
return result[0]
def _UpperCamelCase ( __A , __A , __A ) -> int:
'''simple docstring'''
UpperCamelCase__ = SVR(kernel="rbf" , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCamelCase__ = regressor.predict(lowerCAmelCase__ )
return y_pred[0]
def _UpperCamelCase ( __A ) -> Any:
'''simple docstring'''
train_user.sort()
UpperCamelCase__ = np.percentile(lowerCAmelCase__ , 25 )
UpperCamelCase__ = np.percentile(lowerCAmelCase__ , 75 )
UpperCamelCase__ = qa - qa
UpperCamelCase__ = qa - (iqr * 0.1)
return low_lim
def _UpperCamelCase ( __A , __A ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = 0
UpperCamelCase__ = 0
for i in list_vote:
if i > actual_result:
UpperCamelCase__ = not_safe + 1
else:
if abs(abs(lowerCAmelCase__ ) - abs(lowerCAmelCase__ ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
a__ : int = [[1_8_2_3_1, 0.0, 1], [2_2_6_2_1, 1.0, 2], [1_5_6_7_5, 0.0, 3], [2_3_5_8_3, 1.0, 4]]
a__ : Any = pd.DataFrame(
data_input, columns=['total_user', 'total_even', 'days']
)
a__ : Tuple = Normalizer().fit_transform(data_input_df.values)
# split data
a__ : Union[str, Any] = normalize_df[:, 2].tolist()
a__ : Tuple = normalize_df[:, 0].tolist()
a__ : List[Any] = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
a__ : Optional[Any] = normalize_df[:, [1, 2]].tolist()
a__ : str = x[: len(x) - 1]
a__ : Dict = x[len(x) - 1 :]
# for linear regression & sarimax
a__ : Tuple = total_date[: len(total_date) - 1]
a__ : Any = total_user[: len(total_user) - 1]
a__ : str = total_match[: len(total_match) - 1]
a__ : Optional[int] = total_date[len(total_date) - 1 :]
a__ : int = total_user[len(total_user) - 1 :]
a__ : Union[str, Any] = total_match[len(total_match) - 1 :]
# voting system with forecasting
a__ : int = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
a__ : Dict = "" if data_safety_checker(res_vote, tst_user) else "not "
print('Today\'s data is {not_str}safe.')
| 80 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ :str = logging.get_logger(__name__)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = '''huggingface/label-files'''
lowercase = '''imagenet-1k-id2label.json'''
lowercase = json.load(open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ , repo_type='''dataset''' ) , '''r''' ) )
lowercase = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
lowercase = {v: k for k, v in idalabel.items()}
lowercase = '''std_conv''' if '''bit''' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
lowercase = BitConfig(
conv_layer=lowerCAmelCase__ , num_labels=1000 , idalabel=lowerCAmelCase__ , labelaid=lowerCAmelCase__ , )
return config
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if "stem.conv" in name:
lowercase = name.replace('''stem.conv''' , '''bit.embedder.convolution''' )
if "blocks" in name:
lowercase = name.replace('''blocks''' , '''layers''' )
if "head.fc" in name:
lowercase = name.replace('''head.fc''' , '''classifier.1''' )
if name.startswith('''norm''' ):
lowercase = '''bit.''' + name
if "bit" not in name and "classifier" not in name:
lowercase = '''bit.encoder.''' + name
return name
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return im
@torch.no_grad()
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ):
'''simple docstring'''
lowercase = get_config(lowerCAmelCase__ )
# load original model from timm
lowercase = create_model(lowerCAmelCase__ , pretrained=lowerCAmelCase__ )
timm_model.eval()
# load state_dict of original model
lowercase = timm_model.state_dict()
for key in state_dict.copy().keys():
lowercase = state_dict.pop(lowerCAmelCase__ )
lowercase = val.squeeze() if '''head''' in key else val
# load HuggingFace model
lowercase = BitForImageClassification(lowerCAmelCase__ )
model.eval()
model.load_state_dict(lowerCAmelCase__ )
# create image processor
lowercase = create_transform(**resolve_data_config({} , model=lowerCAmelCase__ ) )
lowercase = transform.transforms
lowercase = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
lowercase = BitImageProcessor(
do_resize=lowerCAmelCase__ , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=lowerCAmelCase__ , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=lowerCAmelCase__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
lowercase = prepare_img()
lowercase = transform(lowerCAmelCase__ ).unsqueeze(0 )
lowercase = processor(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ )
# verify logits
with torch.no_grad():
lowercase = model(lowerCAmelCase__ )
lowercase = outputs.logits
print('''Logits:''' , logits[0, :3] )
print('''Predicted class:''' , model.config.idalabel[logits.argmax(-1 ).item()] )
lowercase = timm_model(lowerCAmelCase__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowerCAmelCase__ , outputs.logits , atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
print(f'Saving model {model_name} and processor to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
print(f'Pushing model {model_name} and processor to the hub' )
model.push_to_hub(f'ybelkada/{model_name}' )
processor.push_to_hub(f'ybelkada/{model_name}' )
if __name__ == "__main__":
lowercase__ :List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="resnetv2_50x1_bitm",
type=str,
help="Name of the BiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model to the hub.",
)
lowercase__ :List[str] = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 101 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__snake_case = {
'''configuration_xlm''': ['''XLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMConfig''', '''XLMOnnxConfig'''],
'''tokenization_xlm''': ['''XLMTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMForMultipleChoice''',
'''XLMForQuestionAnswering''',
'''XLMForQuestionAnsweringSimple''',
'''XLMForSequenceClassification''',
'''XLMForTokenClassification''',
'''XLMModel''',
'''XLMPreTrainedModel''',
'''XLMWithLMHeadModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMForMultipleChoice''',
'''TFXLMForQuestionAnsweringSimple''',
'''TFXLMForSequenceClassification''',
'''TFXLMForTokenClassification''',
'''TFXLMMainLayer''',
'''TFXLMModel''',
'''TFXLMPreTrainedModel''',
'''TFXLMWithLMHeadModel''',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 359 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=a__ )
class __lowerCamelCase ( a__ ):
'''simple docstring'''
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
A_ : str = field(default='summarization' , metadata={'include_in_asdict_even_if_is_default': True} )
A_ : ClassVar[Features] = Features({'text': Value('string' )} )
A_ : ClassVar[Features] = Features({'summary': Value('string' )} )
A_ : str = "text"
A_ : str = "summary"
@property
def _UpperCAmelCase ( self ) -> Dict[str, str]:
return {self.text_column: "text", self.summary_column: "summary"} | 153 | 0 |
snake_case_ : Optional[int] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
def A (__A : bytes ) -> bytes:
"""simple docstring"""
if not isinstance(__A , __A ):
UpperCAmelCase_ = F"""a bytes-like object is required, not '{data.__class__.__name__}'"""
raise TypeError(__A )
UpperCAmelCase_ = ''''''.join(bin(__A )[2:].zfill(8 ) for byte in data )
UpperCAmelCase_ = len(__A ) % 6 != 0
if padding_needed:
# The padding that will be added later
UpperCAmelCase_ = B'''=''' * ((6 - len(__A ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(__A ) % 6)
else:
UpperCAmelCase_ = B''''''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(__A ) , 6 ) ).encode()
+ padding
)
def A (__A : str ) -> bytes:
"""simple docstring"""
if not isinstance(__A , __A ) and not isinstance(__A , __A ):
UpperCAmelCase_ = (
'''argument should be a bytes-like object or ASCII string, '''
F"""not '{encoded_data.__class__.__name__}'"""
)
raise TypeError(__A )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(__A , __A ):
try:
UpperCAmelCase_ = encoded_data.decode('''utf-8''' )
except UnicodeDecodeError:
raise ValueError('''base64 encoded data should only contain ASCII characters''' )
UpperCAmelCase_ = encoded_data.count('''=''' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(__A ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
UpperCAmelCase_ = encoded_data[:-padding]
UpperCAmelCase_ = ''''''.join(
bin(B64_CHARSET.index(__A ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
UpperCAmelCase_ = ''''''.join(
bin(B64_CHARSET.index(__A ) )[2:].zfill(6 ) for char in encoded_data )
UpperCAmelCase_ = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(__A ) , 8 )
]
return bytes(__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 51 | '''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
__a = datasets.utils.logging.get_logger(__name__)
__a = ['names', 'prefix']
__a = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols']
__a = ['encoding_errors', 'on_bad_lines']
__a = ['date_format']
@dataclass
class A__ ( datasets.BuilderConfig ):
"""simple docstring"""
UpperCamelCase_ : str = ","
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : Optional[Union[int, List[int], str]] = "infer"
UpperCamelCase_ : Optional[List[str]] = None
UpperCamelCase_ : Optional[List[str]] = None
UpperCamelCase_ : Optional[Union[int, str, List[int], List[str]]] = None
UpperCamelCase_ : Optional[Union[List[int], List[str]]] = None
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : bool = True
UpperCamelCase_ : Optional[Literal["c", "python", "pyarrow"]] = None
UpperCamelCase_ : Dict[Union[int, str], Callable[[Any], Any]] = None
UpperCamelCase_ : Optional[list] = None
UpperCamelCase_ : Optional[list] = None
UpperCamelCase_ : bool = False
UpperCamelCase_ : Optional[Union[int, List[int]]] = None
UpperCamelCase_ : Optional[int] = None
UpperCamelCase_ : Optional[Union[str, List[str]]] = None
UpperCamelCase_ : bool = True
UpperCamelCase_ : bool = True
UpperCamelCase_ : bool = False
UpperCamelCase_ : bool = True
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : str = "."
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : str = '"'
UpperCamelCase_ : int = 0
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : bool = True
UpperCamelCase_ : bool = True
UpperCamelCase_ : int = 0
UpperCamelCase_ : bool = True
UpperCamelCase_ : bool = False
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : int = 1_00_00
UpperCamelCase_ : Optional[datasets.Features] = None
UpperCamelCase_ : Optional[str] = "strict"
UpperCamelCase_ : Literal["error", "warn", "skip"] = "error"
UpperCamelCase_ : Optional[str] = None
def _lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
if self.delimiter is not None:
_UpperCAmelCase : List[Any] = self.delimiter
if self.column_names is not None:
_UpperCAmelCase : Union[str, Any] = self.column_names
@property
def _lowerCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Any = {
"sep": self.sep,
"header": self.header,
"names": self.names,
"index_col": self.index_col,
"usecols": self.usecols,
"prefix": self.prefix,
"mangle_dupe_cols": self.mangle_dupe_cols,
"engine": self.engine,
"converters": self.converters,
"true_values": self.true_values,
"false_values": self.false_values,
"skipinitialspace": self.skipinitialspace,
"skiprows": self.skiprows,
"nrows": self.nrows,
"na_values": self.na_values,
"keep_default_na": self.keep_default_na,
"na_filter": self.na_filter,
"verbose": self.verbose,
"skip_blank_lines": self.skip_blank_lines,
"thousands": self.thousands,
"decimal": self.decimal,
"lineterminator": self.lineterminator,
"quotechar": self.quotechar,
"quoting": self.quoting,
"escapechar": self.escapechar,
"comment": self.comment,
"encoding": self.encoding,
"dialect": self.dialect,
"error_bad_lines": self.error_bad_lines,
"warn_bad_lines": self.warn_bad_lines,
"skipfooter": self.skipfooter,
"doublequote": self.doublequote,
"memory_map": self.memory_map,
"float_precision": self.float_precision,
"chunksize": self.chunksize,
"encoding_errors": self.encoding_errors,
"on_bad_lines": self.on_bad_lines,
"date_format": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , lowerCAmelCase__ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class A__ ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
UpperCamelCase_ : Tuple = CsvConfig
def _lowerCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def _lowerCAmelCase ( self : int , lowerCAmelCase__ : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
_UpperCAmelCase : Optional[Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowerCAmelCase__ , (str, list, tuple) ):
_UpperCAmelCase : Tuple = data_files
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : Any = [files]
_UpperCAmelCase : Union[str, Any] = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
_UpperCAmelCase : Tuple = []
for split_name, files in data_files.items():
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : Dict = [files]
_UpperCAmelCase : Any = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCAmelCase__ , gen_kwargs={"files": files} ) )
return splits
def _lowerCAmelCase ( self : Dict , lowerCAmelCase__ : pa.Table ) -> pa.Table:
"""simple docstring"""
if self.config.features is not None:
_UpperCAmelCase : List[str] = self.config.features.arrow_schema
if all(not require_storage_cast(lowerCAmelCase__ ) for feature in self.config.features.values() ):
# cheaper cast
_UpperCAmelCase : Optional[Any] = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=lowerCAmelCase__ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
_UpperCAmelCase : List[Any] = table_cast(lowerCAmelCase__ , lowerCAmelCase__ )
return pa_table
def _lowerCAmelCase ( self : Any , lowerCAmelCase__ : int ) -> str:
"""simple docstring"""
_UpperCAmelCase : Tuple = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
_UpperCAmelCase : Tuple = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(lowerCAmelCase__ ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase__ ) ):
_UpperCAmelCase : Tuple = pd.read_csv(lowerCAmelCase__ , iterator=lowerCAmelCase__ , dtype=lowerCAmelCase__ , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(lowerCAmelCase__ ):
_UpperCAmelCase : Union[str, Any] = pa.Table.from_pandas(lowerCAmelCase__ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(lowerCAmelCase__ )
except ValueError as e:
logger.error(F"""Failed to read file '{file}' with error {type(lowerCAmelCase__ )}: {e}""" )
raise | 145 | 0 |
"""simple docstring"""
lowerCamelCase_ : Dict = [
(1_0_0_0, """M"""),
(9_0_0, """CM"""),
(5_0_0, """D"""),
(4_0_0, """CD"""),
(1_0_0, """C"""),
(9_0, """XC"""),
(5_0, """L"""),
(4_0, """XL"""),
(1_0, """X"""),
(9, """IX"""),
(5, """V"""),
(4, """IV"""),
(1, """I"""),
]
def _A ( lowercase ):
"""simple docstring"""
a ={'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 1_00, '''D''': 5_00, '''M''': 10_00}
a =0
a =0
while place < len(_snake_case ):
if (place + 1 < len(_snake_case )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def _A ( lowercase ):
"""simple docstring"""
a =[]
for arabic, roman in ROMAN:
(a) =divmod(_snake_case , _snake_case )
result.append(roman * factor )
if number == 0:
break
return "".join(_snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod() | 352 |
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
lowerCamelCase_ : Any = random.Random()
def _A ( lowercase , lowercase=1.0 , lowercase=None , lowercase=None ):
"""simple docstring"""
if rng is None:
a =global_rng
a =[]
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __A ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , __A , __A=7 , __A=400 , __A=2000 , __A=10 , __A=160 , __A=8 , __A=0.0 , __A=4000 , __A=False , __A=True , ) -> Optional[Any]:
a =parent
a =batch_size
a =min_seq_length
a =max_seq_length
a =(self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
a =padding_value
a =sampling_rate
a =return_attention_mask
a =do_normalize
a =feature_size
a =chunk_length
a =hop_length
def SCREAMING_SNAKE_CASE ( self ) -> str:
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def SCREAMING_SNAKE_CASE ( self , __A=False , __A=False ) -> str:
def _flatten(__A ):
return list(itertools.chain(*__A ) )
if equal_length:
a =[floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
a =[
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
a =[np.asarray(__A ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __A ( _SCREAMING_SNAKE_CASE, unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = WhisperFeatureExtractor if is_speech_available() else None
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
a =WhisperFeatureExtractionTester(self )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
a =feat_extract_first.save_pretrained(__A )[0]
check_json_file_has_correct_format(__A )
a =self.feature_extraction_class.from_pretrained(__A )
a =feat_extract_first.to_dict()
a =feat_extract_second.to_dict()
a =feat_extract_first.mel_filters
a =feat_extract_second.mel_filters
self.assertTrue(np.allclose(__A , __A ) )
self.assertEqual(__A , __A )
def SCREAMING_SNAKE_CASE ( self ) -> str:
a =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
a =os.path.join(__A , '''feat_extract.json''' )
feat_extract_first.to_json_file(__A )
a =self.feature_extraction_class.from_json_file(__A )
a =feat_extract_first.to_dict()
a =feat_extract_second.to_dict()
a =feat_extract_first.mel_filters
a =feat_extract_second.mel_filters
self.assertTrue(np.allclose(__A , __A ) )
self.assertEqual(__A , __A )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
# Tests that all call wrap to encode_plus and batch_encode_plus
a =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
a =[floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a =[np.asarray(__A ) for speech_input in speech_inputs]
# Test feature size
a =feature_extractor(__A , padding='''max_length''' , return_tensors='''np''' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
a =feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features
a =feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features
self.assertTrue(np.allclose(__A , __A , atol=1E-3 ) )
# Test batched
a =feature_extractor(__A , return_tensors='''np''' ).input_features
a =feature_extractor(__A , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(__A , __A ):
self.assertTrue(np.allclose(__A , __A , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
a =[floats_list((1, x) )[0] for x in (800, 800, 800)]
a =np.asarray(__A )
a =feature_extractor(__A , return_tensors='''np''' ).input_features
a =feature_extractor(__A , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(__A , __A ):
self.assertTrue(np.allclose(__A , __A , atol=1E-3 ) )
# Test truncation required
a =[floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
a =[np.asarray(__A ) for speech_input in speech_inputs]
a =[x[: feature_extractor.n_samples] for x in speech_inputs]
a =[np.asarray(__A ) for speech_input in speech_inputs_truncated]
a =feature_extractor(__A , return_tensors='''np''' ).input_features
a =feature_extractor(__A , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(__A , __A ):
self.assertTrue(np.allclose(__A , __A , atol=1E-3 ) )
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
import torch
a =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a =np.random.rand(100 , 32 ).astype(np.floataa )
a =np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
a =feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
a =feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def SCREAMING_SNAKE_CASE ( self , __A ) -> Dict:
a =load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
a =ds.sort('''id''' ).select(range(__A ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def SCREAMING_SNAKE_CASE ( self ) -> Any:
# fmt: off
a =torch.tensor(
[
0.1_193, -0.0_946, -0.1_098, -0.0_196, 0.0_225, -0.0_690, -0.1_736, 0.0_951,
0.0_971, -0.0_817, -0.0_702, 0.0_162, 0.0_260, 0.0_017, -0.0_192, -0.1_678,
0.0_709, -0.1_867, -0.0_655, -0.0_274, -0.0_234, -0.1_884, -0.0_516, -0.0_554,
-0.0_274, -0.1_425, -0.1_423, 0.0_837, 0.0_377, -0.0_854
] )
# fmt: on
a =self._load_datasamples(1 )
a =WhisperFeatureExtractor()
a =feature_extractor(__A , return_tensors='''pt''' ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , __A , atol=1E-4 ) )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a =self._load_datasamples(1 )[0]
a =((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue
a =feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=__A )[0]
self.assertTrue(np.all(np.mean(__A ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(__A ) - 1 ) < 1E-3 ) ) | 215 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"vocab_file": "spm_char.model"}
lowercase_ = {
"vocab_file": {
"microsoft/speecht5_asr": "https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model",
"microsoft/speecht5_tts": "https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model",
"microsoft/speecht5_vc": "https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model",
}
}
lowercase_ = {
"microsoft/speecht5_asr": 1_0_2_4,
"microsoft/speecht5_tts": 1_0_2_4,
"microsoft/speecht5_vc": 1_0_2_4,
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : int = VOCAB_FILES_NAMES
__UpperCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Tuple = ['input_ids', 'attention_mask']
def __init__( self , _a , _a="<s>" , _a="</s>" , _a="<unk>" , _a="<pad>" , _a = None , **_a , ):
__a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_a , eos_token=_a , unk_token=_a , pad_token=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , )
__a = vocab_file
__a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_a )
@property
def __UpperCAmelCase ( self ):
return self.sp_model.get_piece_size()
def __UpperCAmelCase ( self ):
__a = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
__a = self.__dict__.copy()
__a = None
return state
def __setstate__( self , _a ):
__a = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__a = {}
__a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __UpperCAmelCase ( self , _a ):
return self.sp_model.encode(_a , out_type=_a )
def __UpperCAmelCase ( self , _a ):
return self.sp_model.piece_to_id(_a )
def __UpperCAmelCase ( self , _a ):
__a = self.sp_model.IdToPiece(_a )
return token
def __UpperCAmelCase ( self , _a ):
__a = []
__a = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_a ) + token
__a = []
else:
current_sub_tokens.append(_a )
out_string += self.sp_model.decode(_a )
return out_string.strip()
def __UpperCAmelCase ( self , _a , _a=None ):
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def __UpperCAmelCase ( self , _a , _a = None , _a = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
__a = [1]
if token_ids_a is None:
return ([0] * len(_a )) + suffix_ones
return ([0] * len(_a )) + ([0] * len(_a )) + suffix_ones
def __UpperCAmelCase ( self , _a , _a = None ):
if not os.path.isdir(_a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__a = os.path.join(
_a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _a )
elif not os.path.isfile(self.vocab_file ):
with open(_a , '''wb''' ) as fi:
__a = self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
| 45 |
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
lowerCamelCase : int = input("Enter image url: ").strip()
print(F"""Downloading image from {url} ...""")
lowerCamelCase : Tuple = BeautifulSoup(requests.get(url).content, "html.parser")
# The image URL is in the content field of the first meta tag with property og:image
lowerCamelCase : int = soup.find("meta", {"property": "og:image"})["content"]
lowerCamelCase : Dict = requests.get(image_url).content
lowerCamelCase : Optional[int] = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"""
with open(file_name, "wb") as fp:
fp.write(image_data)
print(F"""Done. Image saved to disk as {file_name}.""")
| 204 | 0 |
"""simple docstring"""
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def __lowercase ( __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str]=None ):
a__ = None
if token is not None:
a__ = {'Accept': 'application/vnd.github+json', 'Authorization': F'Bearer {token}'}
a__ = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'
a__ = requests.get(__lowerCAmelCase , headers=__lowerCAmelCase ).json()
a__ = {}
try:
job_links.update({job['name']: job['html_url'] for job in result['jobs']} )
a__ = math.ceil((result['total_count'] - 1_0_0) / 1_0_0 )
for i in range(__lowerCAmelCase ):
a__ = requests.get(url + F'&page={i + 2}' , headers=__lowerCAmelCase ).json()
job_links.update({job['name']: job['html_url'] for job in result['jobs']} )
return job_links
except Exception:
print(F'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
def __lowercase ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int]=None ):
a__ = None
if token is not None:
a__ = {'Accept': 'application/vnd.github+json', 'Authorization': F'Bearer {token}'}
a__ = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'
a__ = requests.get(__lowerCAmelCase , headers=__lowerCAmelCase ).json()
a__ = {}
try:
artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} )
a__ = math.ceil((result['total_count'] - 1_0_0) / 1_0_0 )
for i in range(__lowerCAmelCase ):
a__ = requests.get(url + F'&page={i + 2}' , headers=__lowerCAmelCase ).json()
artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} )
return artifacts
except Exception:
print(F'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : Dict ):
a__ = None
if token is not None:
a__ = {'Accept': 'application/vnd.github+json', 'Authorization': F'Bearer {token}'}
a__ = requests.get(__lowerCAmelCase , headers=__lowerCAmelCase , allow_redirects=__lowerCAmelCase )
a__ = result.headers['Location']
a__ = requests.get(__lowerCAmelCase , allow_redirects=__lowerCAmelCase )
a__ = os.path.join(__lowerCAmelCase , F'{artifact_name}.zip' )
with open(__lowerCAmelCase , 'wb' ) as fp:
fp.write(response.content )
def __lowercase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any]=None ):
a__ = []
a__ = []
a__ = None
with zipfile.ZipFile(__lowerCAmelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(__lowerCAmelCase ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(__lowerCAmelCase ) as f:
for line in f:
a__ = line.decode('UTF-8' ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
a__ = line[: line.index(': ' )]
a__ = line[line.index(': ' ) + len(': ' ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith('FAILED ' ):
# `test` is the test method that failed
a__ = line[len('FAILED ' ) :]
failed_tests.append(__lowerCAmelCase )
elif filename == "job_name.txt":
a__ = line
if len(__lowerCAmelCase ) != len(__lowerCAmelCase ):
raise ValueError(
F'`errors` and `failed_tests` should have the same number of elements. Got {len(__lowerCAmelCase )} for `errors` '
F'and {len(__lowerCAmelCase )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'
' problem.' )
a__ = None
if job_name and job_links:
a__ = job_links.get(__lowerCAmelCase , __lowerCAmelCase )
# A list with elements of the form (line of error, error, failed test)
a__ = [x + [y] + [job_link] for x, y in zip(__lowerCAmelCase , __lowerCAmelCase )]
return result
def __lowercase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[int]=None ):
a__ = []
a__ = [os.path.join(__lowerCAmelCase , __lowerCAmelCase ) for p in os.listdir(__lowerCAmelCase ) if p.endswith('.zip' )]
for p in paths:
errors.extend(get_errors_from_single_artifact(__lowerCAmelCase , job_links=__lowerCAmelCase ) )
return errors
def __lowercase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str]=None ):
a__ = Counter()
counter.update([x[1] for x in logs] )
a__ = counter.most_common()
a__ = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
a__ = {'count': count, 'failed_tests': [(x[2], x[0]) for x in logs if x[1] == error]}
a__ = dict(sorted(r.items() , key=lambda __lowerCAmelCase : item[1]["count"] , reverse=__lowerCAmelCase ) )
return r
def __lowercase ( __lowerCAmelCase : int ):
a__ = test.split('::' )[0]
if test.startswith('tests/models/' ):
a__ = test.split('/' )[2]
else:
a__ = None
return test
def __lowercase ( __lowerCAmelCase : Any , __lowerCAmelCase : List[str]=None ):
a__ = [(x[0], x[1], get_model(x[2] )) for x in logs]
a__ = [x for x in logs if x[2] is not None]
a__ = {x[2] for x in logs}
a__ = {}
for test in tests:
a__ = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
a__ = counter.most_common()
a__ = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
a__ = sum(error_counts.values() )
if n_errors > 0:
a__ = {'count': n_errors, 'errors': error_counts}
a__ = dict(sorted(r.items() , key=lambda __lowerCAmelCase : item[1]["count"] , reverse=__lowerCAmelCase ) )
return r
def __lowercase ( __lowerCAmelCase : str ):
a__ = '| no. | error | status |'
a__ = '|-:|:-|:-|'
a__ = [header, sep]
for error in reduced_by_error:
a__ = reduced_by_error[error]['count']
a__ = F'| {count} | {error[:1_0_0]} | |'
lines.append(__lowerCAmelCase )
return "\n".join(__lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : List[Any] ):
a__ = '| model | no. of errors | major error | count |'
a__ = '|-:|-:|-:|-:|'
a__ = [header, sep]
for model in reduced_by_model:
a__ = reduced_by_model[model]['count']
a__ , a__ = list(reduced_by_model[model]['errors'].items() )[0]
a__ = F'| {model} | {count} | {error[:6_0]} | {_count} |'
lines.append(__lowerCAmelCase )
return "\n".join(__lowerCAmelCase )
if __name__ == "__main__":
snake_case : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
parser.add_argument(
'''--output_dir''',
type=str,
required=True,
help='''Where to store the downloaded artifacts and other result files.''',
)
parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''')
snake_case : Optional[int] = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
snake_case : Union[str, Any] = get_job_links(args.workflow_run_id, token=args.token)
snake_case : Optional[int] = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
snake_case : int = k.find(''' / ''')
snake_case : Tuple = k[index + len(''' / ''') :]
snake_case : Dict = v
with open(os.path.join(args.output_dir, '''job_links.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
snake_case : List[Any] = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
snake_case : List[Any] = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
snake_case : Union[str, Any] = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
snake_case : Optional[Any] = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, '''errors.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
snake_case : List[Any] = reduce_by_error(errors)
snake_case : str = reduce_by_model(errors)
snake_case : List[Any] = make_github_table(reduced_by_error)
snake_case : Tuple = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, '''reduced_by_error.txt'''), '''w''', encoding='''UTF-8''') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, '''reduced_by_model.txt'''), '''w''', encoding='''UTF-8''') as fp:
fp.write(sa)
| 367 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
snake_case : Union[str, Any] = logging.get_logger(__name__)
snake_case : Tuple = torch.device('''cpu''')
def __lowercase ( ):
a__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
a__ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return im
def __lowercase ( __lowerCAmelCase : List[str] ):
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_7_0_3E0_0, 2.1_1_0_7E0_0, -2.0_8_1_1E0_0, 8.8_6_8_5E-0_1, 2.4_3_6_0E-0_1] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_6_3_6E-0_1, 2.3_4_7_8E-0_1, -1.6_9_6_3E0_0, -1.7_3_8_1E0_0, -8.6_3_3_7E-0_1] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_7_6_8E-0_1, -4.7_4_2_9E-0_1, -1.0_8_9_7E0_0, -1.0_2_4_8E0_0, 3.5_5_2_3E-0_2] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_3_3_0E-0_1, 2.4_2_1_1E-0_1, -6.0_1_8_5E-0_1, -8.2_7_8_9E-0_1, -6.0_4_4_6E-0_2] )
def __lowercase ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str ):
a__ = dct.pop(__lowerCAmelCase )
a__ = val
def __lowercase ( __lowerCAmelCase : int ):
a__ = []
for k in state_dict.keys():
a__ = k
if ".pwconv" in k:
a__ = k_new.replace('.pwconv' , '.point_wise_conv' )
if ".dwconv" in k:
a__ = k_new.replace('.dwconv' , '.depth_wise_conv' )
if ".Proj." in k:
a__ = k_new.replace('.Proj.' , '.proj.' )
if "patch_embed" in k_new:
a__ = k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding' )
if "network" in k_new:
a__ = k_new.split('.' )
if ls[2].isdigit():
a__ = 'swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] )
else:
a__ = k_new.replace('network' , 'swiftformer.encoder.network' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def __lowercase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : str ):
a__ = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
a__ = 1_0_0_0
a__ = 'huggingface/label-files'
a__ = 'imagenet-1k-id2label.json'
a__ = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='dataset' ) , 'r' ) )
a__ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
a__ = idalabel
a__ = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
a__ = [3, 3, 6, 4]
a__ = [4_8, 5_6, 1_1_2, 2_2_0]
elif swiftformer_name == "swiftformer_s":
a__ = [3, 3, 9, 6]
a__ = [4_8, 6_4, 1_6_8, 2_2_4]
elif swiftformer_name == "swiftformer_l1":
a__ = [4, 3, 1_0, 5]
a__ = [4_8, 9_6, 1_9_2, 3_8_4]
elif swiftformer_name == "swiftformer_l3":
a__ = [4, 4, 1_2, 6]
a__ = [6_4, 1_2_8, 3_2_0, 5_1_2]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('https' ):
a__ = torch.hub.load_state_dict_from_url(__lowerCAmelCase , map_location='cpu' , check_hash=__lowerCAmelCase )
else:
a__ = torch.load(__lowerCAmelCase , map_location='cpu' )
a__ = checkpoint
a__ = create_rename_keys(__lowerCAmelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# load HuggingFace model
a__ = SwiftFormerForImageClassification(__lowerCAmelCase ).eval()
hf_model.load_state_dict(__lowerCAmelCase )
# prepare test inputs
a__ = prepare_img()
a__ = ViTImageProcessor.from_pretrained('preprocessor_config' )
a__ = processor(images=__lowerCAmelCase , return_tensors='pt' )
# compare outputs from both models
a__ = get_expected_output(__lowerCAmelCase )
a__ = hf_model(inputs['pixel_values'] ).logits
assert hf_logits.shape == torch.Size([1, 1_0_0_0] )
assert torch.allclose(hf_logits[0, 0:5] , __lowerCAmelCase , atol=1E-3 )
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
print(F'Saving model {swiftformer_name} to {pytorch_dump_folder_path}' )
hf_model.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
snake_case : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swiftformer_name''',
default='''swiftformer_xs''',
choices=['''swiftformer_xs''', '''swiftformer_s''', '''swiftformer_l1''', '''swiftformer_l3'''],
type=str,
help='''Name of the SwiftFormer model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''./converted_outputs/''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--original_ckpt''', default=None, type=str, help='''Path to the original model checkpoint.''')
snake_case : Optional[int] = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 109 | 0 |
"""simple docstring"""
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
UpperCAmelCase : Dict = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
UpperCAmelCase : Tuple = " def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n"
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , """models/bert/"""))
lowercase_ = self.transformer_dir
shutil.copy(
os.path.join(lowerCAmelCase_ , """src/transformers/models/bert/modeling_bert.py""") , os.path.join(self.transformer_dir , """models/bert/modeling_bert.py""") , )
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ = """src/transformers"""
shutil.rmtree(self.transformer_dir)
def _UpperCAmelCase ( self : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[str]=None):
"""simple docstring"""
lowercase_ = comment + F'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
lowercase_ = comment + F'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
lowercase_ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_1_9)
lowercase_ = black.format_str(lowerCAmelCase_ , mode=lowerCAmelCase_)
lowercase_ = os.path.join(self.transformer_dir , """new_code.py""")
with open(lowerCAmelCase_ , """w""" , newline="""\n""") as f:
f.write(lowerCAmelCase_)
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowerCAmelCase_)) == 0)
else:
check_copies.is_copy_consistent(f.name , overwrite=lowerCAmelCase_)
with open(lowerCAmelCase_ , """r""") as f:
self.assertTrue(f.read() , lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = check_copies.find_code_in_transformers("""models.bert.modeling_bert.BertLMPredictionHead""")
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , REFERENCE_CODE + """\n""" , )
# With no empty line at the end
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , lowerCAmelCase_ , )
# Copy consistency with rename
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , re.sub("""Bert""" , """TestModel""" , lowerCAmelCase_) , )
# Copy consistency with a really long name
lowercase_ = """TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
F'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}''' , F'''{long_class_name}LMPredictionHead''' , re.sub("""Bert""" , lowerCAmelCase_ , lowerCAmelCase_) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , lowerCAmelCase_ , overwrite_result=re.sub("""Bert""" , """TestModel""" , lowerCAmelCase_) , )
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = check_copies.LOCALIZED_READMES["""README_zh-hans.md"""]
lowercase_ = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"""
""" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"""
""" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"""
""" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"""
""" Luong, Quoc V. Le, Christopher D. Manning."""
)
lowercase_ = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
lowercase_ = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"""
""" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"""
""" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"""
""" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"""
""" Christopher D. Manning 发布。\n"""
)
lowercase_ , lowercase_ = check_copies.convert_to_localized_md(
lowerCAmelCase_ , lowerCAmelCase_ , localized_readme["""format_model_list"""])
self.assertFalse(lowerCAmelCase_)
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ , lowercase_ = check_copies.convert_to_localized_md(
lowerCAmelCase_ , lowerCAmelCase_ , localized_readme["""format_model_list"""])
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(lowerCAmelCase_)
lowercase_ = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."""
)
lowercase_ = (
"""1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"""
""" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
lowercase_ = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
lowercase_ , lowercase_ = check_copies.convert_to_localized_md(
lowerCAmelCase_ , lowerCAmelCase_ , localized_readme["""format_model_list"""])
# Check if the model link is synchronized.
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_)
| 136 |
"""simple docstring"""
from math import isqrt, loga
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> list[int]:
'''simple docstring'''
lowercase_ = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , __lowerCAmelCase , __lowerCAmelCase ):
lowercase_ = False
return [i for i in range(2 , __lowerCAmelCase ) if is_prime[i]]
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = 80_08_00 , __lowerCAmelCase = 80_08_00 ) -> int:
'''simple docstring'''
lowercase_ = degree * loga(__lowerCAmelCase )
lowercase_ = int(__lowerCAmelCase )
lowercase_ = calculate_prime_numbers(__lowerCAmelCase )
lowercase_ = 0
lowercase_ = 0
lowercase_ = len(__lowerCAmelCase ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F"{solution() = }")
| 136 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__UpperCAmelCase = {
'vocab_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json'
),
},
}
__UpperCAmelCase = {
'yjernite/retribert-base-uncased': 5_12,
}
__UpperCAmelCase = {
'yjernite/retribert-base-uncased': {'do_lower_case': True},
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
UpperCAmelCase_ =VOCAB_FILES_NAMES
UpperCAmelCase_ =PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ =PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase_ =RetriBertTokenizer
UpperCAmelCase_ =["input_ids", "attention_mask"]
def __init__( self , _A=None , _A=None , _A=True , _A="[UNK]" , _A="[SEP]" , _A="[PAD]" , _A="[CLS]" , _A="[MASK]" , _A=True , _A=None , **_A , ) -> Tuple:
super().__init__(
_A , tokenizer_file=_A , do_lower_case=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , tokenize_chinese_chars=_A , strip_accents=_A , **_A , )
SCREAMING_SNAKE_CASE_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _A ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _A ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _A ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE_ = getattr(_A , normalizer_state.pop('''type''' ) )
SCREAMING_SNAKE_CASE_ = do_lower_case
SCREAMING_SNAKE_CASE_ = strip_accents
SCREAMING_SNAKE_CASE_ = tokenize_chinese_chars
SCREAMING_SNAKE_CASE_ = normalizer_class(**_A )
SCREAMING_SNAKE_CASE_ = do_lower_case
def _UpperCamelCase ( self , _A , _A=None ) -> Tuple:
SCREAMING_SNAKE_CASE_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _UpperCamelCase ( self , _A , _A = None ) -> List[int]:
SCREAMING_SNAKE_CASE_ = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCamelCase ( self , _A , _A = None ) -> Tuple[str]:
SCREAMING_SNAKE_CASE_ = self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
| 359 |
from graphs.minimum_spanning_tree_kruskal import kruskal
def A__ ( ):
SCREAMING_SNAKE_CASE_ = 9
SCREAMING_SNAKE_CASE_ = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
SCREAMING_SNAKE_CASE_ = kruskal(__lowerCamelCase, __lowerCamelCase )
SCREAMING_SNAKE_CASE_ = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(__lowerCamelCase ) == sorted(__lowerCamelCase )
| 257 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.