code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __SCREAMING_SNAKE_CASE ():
snake_case_ = ArgumentParser(
description=(
'''PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'''
) )
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''' , type=SCREAMING_SNAKE_CASE__ , default=1 , help='''Number of TPU cores to use (1 or 8).''' )
# positional
parser.add_argument(
'''training_script''' , type=SCREAMING_SNAKE_CASE__ , help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
) , )
# rest from the training program
parser.add_argument('''training_script_args''' , nargs=SCREAMING_SNAKE_CASE__ )
return parser.parse_args()
def __SCREAMING_SNAKE_CASE ():
snake_case_ = parse_args()
# Import training_script as a module.
snake_case_ = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
snake_case_ = script_fpath.stem
snake_case_ = importlib.import_module(SCREAMING_SNAKE_CASE__ )
# Patch sys.argv
snake_case_ = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main() | 39 |
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
lowerCAmelCase_ = logging.get_logger(__name__)
class snake_case_ ( __A ):
'''simple docstring'''
def __init__( self : Dict , *_UpperCamelCase : int , **_UpperCamelCase : Tuple ) ->None:
warnings.warn(
'''The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use ChineseCLIPImageProcessor instead.''' , _UpperCamelCase , )
super().__init__(*_UpperCamelCase , **_UpperCamelCase ) | 39 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase_ = {
'''configuration_squeezebert''': [
'''SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SqueezeBertConfig''',
'''SqueezeBertOnnxConfig''',
],
'''tokenization_squeezebert''': ['''SqueezeBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''SqueezeBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SqueezeBertForMaskedLM''',
'''SqueezeBertForMultipleChoice''',
'''SqueezeBertForQuestionAnswering''',
'''SqueezeBertForSequenceClassification''',
'''SqueezeBertForTokenClassification''',
'''SqueezeBertModel''',
'''SqueezeBertModule''',
'''SqueezeBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 39 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = "rwkv"
SCREAMING_SNAKE_CASE : Any = {"max_position_embeddings": "context_length"}
def __init__( self : Union[str, Any] , _UpperCamelCase : Any=5_0_2_7_7 , _UpperCamelCase : Optional[int]=1_0_2_4 , _UpperCamelCase : Optional[int]=4_0_9_6 , _UpperCamelCase : str=3_2 , _UpperCamelCase : Tuple=None , _UpperCamelCase : Dict=None , _UpperCamelCase : Optional[int]=1e-5 , _UpperCamelCase : Any=0 , _UpperCamelCase : Optional[Any]=0 , _UpperCamelCase : int=6 , _UpperCamelCase : Dict=False , _UpperCamelCase : Optional[int]=True , **_UpperCamelCase : int , ) ->List[str]:
snake_case_ = vocab_size
snake_case_ = context_length
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = attention_hidden_size if attention_hidden_size is not None else hidden_size
snake_case_ = intermediate_size if intermediate_size is not None else 4 * hidden_size
snake_case_ = layer_norm_epsilon
snake_case_ = rescale_every
snake_case_ = use_cache
snake_case_ = bos_token_id
snake_case_ = eos_token_id
super().__init__(
tie_word_embeddings=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase ) | 39 | 1 |
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
lowerCAmelCase_ = '''pytorch_model.bin'''
lowerCAmelCase_ = '''pytorch_model.bin.index.json'''
lowerCAmelCase_ = '''adapter_config.json'''
lowerCAmelCase_ = '''adapter_model.bin'''
lowerCAmelCase_ = '''adapter_model.safetensors'''
lowerCAmelCase_ = '''tf_model.h5'''
lowerCAmelCase_ = '''tf_model.h5.index.json'''
lowerCAmelCase_ = '''model.ckpt'''
lowerCAmelCase_ = '''flax_model.msgpack'''
lowerCAmelCase_ = '''flax_model.msgpack.index.json'''
lowerCAmelCase_ = '''model.safetensors'''
lowerCAmelCase_ = '''model.safetensors.index.json'''
lowerCAmelCase_ = '''config.json'''
lowerCAmelCase_ = '''preprocessor_config.json'''
lowerCAmelCase_ = FEATURE_EXTRACTOR_NAME
lowerCAmelCase_ = '''generation_config.json'''
lowerCAmelCase_ = '''modelcard.json'''
lowerCAmelCase_ = '''▁'''
lowerCAmelCase_ = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
lowerCAmelCase_ = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
lowerCAmelCase_ = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
lowerCAmelCase_ = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
if version.parse(SCREAMING_SNAKE_CASE__ ) < version.parse(SCREAMING_SNAKE_CASE__ ):
if "dev" in min_version:
snake_case_ = (
'''This example requires a source install from HuggingFace Transformers (see '''
'''`https://huggingface.co/docs/transformers/installation#install-from-source`),'''
)
else:
snake_case_ = F'''This example requires a minimum version of {min_version},'''
error_message += F''' but the version found is {__version__}.\n'''
raise ImportError(
error_message
+ '''Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other '''
'''versions of HuggingFace Transformers.''' ) | 39 |
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
lowerCAmelCase_ = get_logger(__name__)
class snake_case_ :
'''simple docstring'''
def __init__( self : int , _UpperCamelCase : Optional[str] = None ) ->Tuple:
snake_case_ = (
os.path.join(_UpperCamelCase , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
snake_case_ = Extractor
def snake_case__( self : Any , _UpperCamelCase : str ) ->str:
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
snake_case_ = os.path.abspath(_UpperCamelCase )
return os.path.join(self.extract_dir , hash_url_to_filename(_UpperCamelCase ) )
def snake_case__( self : int , _UpperCamelCase : str , _UpperCamelCase : bool ) ->bool:
return force_extract or (
not os.path.isfile(_UpperCamelCase ) and not (os.path.isdir(_UpperCamelCase ) and os.listdir(_UpperCamelCase ))
)
def snake_case__( self : Tuple , _UpperCamelCase : str , _UpperCamelCase : bool = False ) ->str:
snake_case_ = self.extractor.infer_extractor_format(_UpperCamelCase )
if not extractor_format:
return input_path
snake_case_ = self._get_output_path(_UpperCamelCase )
if self._do_extract(_UpperCamelCase , _UpperCamelCase ):
self.extractor.extract(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return output_path
class snake_case_ ( __A ):
'''simple docstring'''
@classmethod
@abstractmethod
def snake_case__( cls : Optional[int] , _UpperCamelCase : Union[Path, str] , **_UpperCamelCase : str ) ->bool:
...
@staticmethod
@abstractmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
...
class snake_case_ ( __A , __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[bytes] = []
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : int ) ->List[Any]:
with open(_UpperCamelCase , '''rb''' ) as f:
return f.read(_UpperCamelCase )
@classmethod
def snake_case__( cls : Union[str, Any] , _UpperCamelCase : Union[Path, str] , _UpperCamelCase : bytes = b"" ) ->bool:
if not magic_number:
snake_case_ = max(len(_UpperCamelCase ) for cls_magic_number in cls.magic_numbers )
try:
snake_case_ = cls.read_magic_number(_UpperCamelCase , _UpperCamelCase )
except OSError:
return False
return any(magic_number.startswith(_UpperCamelCase ) for cls_magic_number in cls.magic_numbers )
class snake_case_ ( __A ):
'''simple docstring'''
@classmethod
def snake_case__( cls : Union[str, Any] , _UpperCamelCase : Union[Path, str] , **_UpperCamelCase : Any ) ->bool:
return tarfile.is_tarfile(_UpperCamelCase )
@staticmethod
def snake_case__( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Dict ) ->List[str]:
def resolved(_UpperCamelCase : str ) -> str:
return os.path.realpath(os.path.abspath(_UpperCamelCase ) )
def badpath(_UpperCamelCase : str , _UpperCamelCase : str ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(_UpperCamelCase , _UpperCamelCase ) ).startswith(_UpperCamelCase )
def badlink(_UpperCamelCase : Tuple , _UpperCamelCase : str ) -> bool:
# Links are interpreted relative to the directory containing the link
snake_case_ = resolved(os.path.join(_UpperCamelCase , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=_UpperCamelCase )
snake_case_ = resolved(_UpperCamelCase )
for finfo in members:
if badpath(finfo.name , _UpperCamelCase ):
logger.error(f'''Extraction of {finfo.name} is blocked (illegal path)''' )
elif finfo.issym() and badlink(_UpperCamelCase , _UpperCamelCase ):
logger.error(f'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''' )
elif finfo.islnk() and badlink(_UpperCamelCase , _UpperCamelCase ):
logger.error(f'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''' )
else:
yield finfo
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
snake_case_ = tarfile.open(_UpperCamelCase )
tar_file.extractall(_UpperCamelCase , members=TarExtractor.safemembers(_UpperCamelCase , _UpperCamelCase ) )
tar_file.close()
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = [b"\x1F\x8B"]
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
with gzip.open(_UpperCamelCase , '''rb''' ) as gzip_file:
with open(_UpperCamelCase , '''wb''' ) as extracted_file:
shutil.copyfileobj(_UpperCamelCase , _UpperCamelCase )
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = [
b"PK\x03\x04",
b"PK\x05\x06", # empty archive
b"PK\x07\x08", # spanned archive
]
@classmethod
def snake_case__( cls : List[str] , _UpperCamelCase : Union[Path, str] , _UpperCamelCase : bytes = b"" ) ->bool:
if super().is_extractable(_UpperCamelCase , magic_number=_UpperCamelCase ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(_UpperCamelCase , '''rb''' ) as fp:
snake_case_ = _EndRecData(_UpperCamelCase )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
snake_case_ = fp.read(_UpperCamelCase ) # CD is where we expect it to be
if len(_UpperCamelCase ) == sizeCentralDir:
snake_case_ = struct.unpack(_UpperCamelCase , _UpperCamelCase ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
with zipfile.ZipFile(_UpperCamelCase , '''r''' ) as zip_file:
zip_file.extractall(_UpperCamelCase )
zip_file.close()
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = [b"\xFD\x37\x7A\x58\x5A\x00"]
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
with lzma.open(_UpperCamelCase ) as compressed_file:
with open(_UpperCamelCase , '''wb''' ) as extracted_file:
shutil.copyfileobj(_UpperCamelCase , _UpperCamelCase )
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = [b"Rar!\x1a\x07\x00", b"Rar!\x1a\x07\x01\x00"] # RAR_ID # RAR5_ID
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
if not config.RARFILE_AVAILABLE:
raise ImportError('''Please pip install rarfile''' )
import rarfile
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
snake_case_ = rarfile.RarFile(_UpperCamelCase )
rf.extractall(_UpperCamelCase )
rf.close()
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = [b"\x28\xb5\x2F\xFD"]
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
if not config.ZSTANDARD_AVAILABLE:
raise ImportError('''Please pip install zstandard''' )
import zstandard as zstd
snake_case_ = zstd.ZstdDecompressor()
with open(_UpperCamelCase , '''rb''' ) as ifh, open(_UpperCamelCase , '''wb''' ) as ofh:
dctx.copy_stream(_UpperCamelCase , _UpperCamelCase )
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = [b"\x42\x5A\x68"]
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
with bza.open(_UpperCamelCase , '''rb''' ) as compressed_file:
with open(_UpperCamelCase , '''wb''' ) as extracted_file:
shutil.copyfileobj(_UpperCamelCase , _UpperCamelCase )
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = [b"\x37\x7A\xBC\xAF\x27\x1C"]
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
if not config.PY7ZR_AVAILABLE:
raise ImportError('''Please pip install py7zr''' )
import pyazr
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
with pyazr.SevenZipFile(_UpperCamelCase , '''r''' ) as archive:
archive.extractall(_UpperCamelCase )
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = [b"\x04\x22\x4D\x18"]
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
if not config.LZ4_AVAILABLE:
raise ImportError('''Please pip install lz4''' )
import lza.frame
with lza.frame.open(_UpperCamelCase , '''rb''' ) as compressed_file:
with open(_UpperCamelCase , '''wb''' ) as extracted_file:
shutil.copyfileobj(_UpperCamelCase , _UpperCamelCase )
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def snake_case__( cls : List[Any] ) ->List[str]:
return max(
len(_UpperCamelCase )
for extractor in cls.extractors.values()
if issubclass(_UpperCamelCase , _UpperCamelCase )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : int ) ->Tuple:
try:
return MagicNumberBaseExtractor.read_magic_number(_UpperCamelCase , magic_number_length=_UpperCamelCase )
except OSError:
return b""
@classmethod
def snake_case__( cls : Optional[Any] , _UpperCamelCase : Union[Path, str] , _UpperCamelCase : bool = False ) ->bool:
warnings.warn(
'''Method \'is_extractable\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '''
'''Use \'infer_extractor_format\' instead.''' , category=_UpperCamelCase , )
snake_case_ = cls.infer_extractor_format(_UpperCamelCase )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def snake_case__( cls : int , _UpperCamelCase : Union[Path, str] ) ->str: # <Added version="2.4.0"/>
snake_case_ = cls._get_magic_number_max_length()
snake_case_ = cls._read_magic_number(_UpperCamelCase , _UpperCamelCase )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(_UpperCamelCase , magic_number=_UpperCamelCase ):
return extractor_format
@classmethod
def snake_case__( cls : Optional[int] , _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Optional[str] = None , _UpperCamelCase : Optional[BaseExtractor] = "deprecated" , ) ->None:
os.makedirs(os.path.dirname(_UpperCamelCase ) , exist_ok=_UpperCamelCase )
# Prevent parallel extractions
snake_case_ = str(Path(_UpperCamelCase ).with_suffix('''.lock''' ) )
with FileLock(_UpperCamelCase ):
shutil.rmtree(_UpperCamelCase , ignore_errors=_UpperCamelCase )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(_UpperCamelCase , _UpperCamelCase ): # passed as positional arg
warnings.warn(
'''Parameter \'extractor\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '''
'''Use \'extractor_format\' instead.''' , category=_UpperCamelCase , )
snake_case_ = extractor if extractor != '''deprecated''' else extractor_format
else:
snake_case_ = cls.extractors[extractor_format]
return extractor.extract(_UpperCamelCase , _UpperCamelCase )
else:
warnings.warn(
'''Parameter \'extractor_format\' was made required in version 2.4.0 and not passing it will raise an '''
'''exception in 3.0.0.''' , category=_UpperCamelCase , )
for extractor in cls.extractors.values():
if extractor.is_extractable(_UpperCamelCase ):
return extractor.extract(_UpperCamelCase , _UpperCamelCase ) | 39 | 1 |
from collections.abc import Callable
import numpy as np
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = int(np.ceil((x_end - xa) / step_size ) )
snake_case_ = np.zeros((n + 1,) )
snake_case_ = ya
snake_case_ = xa
for k in range(SCREAMING_SNAKE_CASE__ ):
snake_case_ = y[k] + step_size * ode_func(SCREAMING_SNAKE_CASE__ , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod() | 39 |
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
if any(not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or x < 0 for x in sequence ):
raise TypeError('''Sequence must be list of non-negative integers''' )
for _ in range(len(SCREAMING_SNAKE_CASE__ ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(SCREAMING_SNAKE_CASE__ , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9] | 39 | 1 |
import unittest
from transformers import DonutProcessor
lowerCAmelCase_ = '''naver-clova-ix/donut-base'''
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : Union[str, Any] ) ->Any:
snake_case_ = DonutProcessor.from_pretrained(_UpperCamelCase )
def snake_case__( self : Dict ) ->str:
snake_case_ = {
'''name''': '''John Doe''',
'''age''': '''99''',
'''city''': '''Atlanta''',
'''state''': '''GA''',
'''zip''': '''30301''',
'''phone''': '''123-4567''',
'''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}],
}
snake_case_ = (
'''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'''
'''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'''
'''<s_nicknames><s_nickname>Johnny</s_nickname>'''
'''<sep/><s_nickname>JD</s_nickname></s_nicknames>'''
)
snake_case_ = self.processor.tokenajson(_UpperCamelCase )
self.assertDictEqual(_UpperCamelCase , _UpperCamelCase ) | 39 |
import re
from filelock import FileLock
try:
import nltk
lowerCAmelCase_ = True
except (ImportError, ModuleNotFoundError):
lowerCAmelCase_ = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
re.sub('''<n>''' , '''''' , SCREAMING_SNAKE_CASE__ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(SCREAMING_SNAKE_CASE__ ) ) | 39 | 1 |
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = F'''{sampling_rate}'''
snake_case_ = '''1'''
snake_case_ = '''f32le'''
snake_case_ = [
'''ffmpeg''',
'''-i''',
'''pipe:0''',
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
try:
with subprocess.Popen(SCREAMING_SNAKE_CASE__ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
snake_case_ = ffmpeg_process.communicate(SCREAMING_SNAKE_CASE__ )
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to load audio files from filename''' ) from error
snake_case_ = output_stream[0]
snake_case_ = np.frombuffer(SCREAMING_SNAKE_CASE__ , np.floataa )
if audio.shape[0] == 0:
raise ValueError('''Malformed soundfile''' )
return audio
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = "f32le" , ):
snake_case_ = F'''{sampling_rate}'''
snake_case_ = '''1'''
if format_for_conversion == "s16le":
snake_case_ = 2
elif format_for_conversion == "f32le":
snake_case_ = 4
else:
raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
snake_case_ = platform.system()
if system == "Linux":
snake_case_ = '''alsa'''
snake_case_ = '''default'''
elif system == "Darwin":
snake_case_ = '''avfoundation'''
snake_case_ = ''':0'''
elif system == "Windows":
snake_case_ = '''dshow'''
snake_case_ = '''default'''
snake_case_ = [
'''ffmpeg''',
'''-f''',
format_,
'''-i''',
input_,
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-fflags''',
'''nobuffer''',
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
snake_case_ = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
snake_case_ = _ffmpeg_stream(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for item in iterator:
yield item
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = "f32le" , ):
if stream_chunk_s is not None:
snake_case_ = stream_chunk_s
else:
snake_case_ = chunk_length_s
snake_case_ = ffmpeg_microphone(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , format_for_conversion=SCREAMING_SNAKE_CASE__ )
if format_for_conversion == "s16le":
snake_case_ = np.intaa
snake_case_ = 2
elif format_for_conversion == "f32le":
snake_case_ = np.floataa
snake_case_ = 4
else:
raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
snake_case_ = chunk_length_s / 6
snake_case_ = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(SCREAMING_SNAKE_CASE__ , (int, float) ):
snake_case_ = [stride_length_s, stride_length_s]
snake_case_ = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
snake_case_ = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
snake_case_ = datetime.datetime.now()
snake_case_ = datetime.timedelta(seconds=SCREAMING_SNAKE_CASE__ )
for item in chunk_bytes_iter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , stride=(stride_left, stride_right) , stream=SCREAMING_SNAKE_CASE__ ):
# Put everything back in numpy scale
snake_case_ = np.frombuffer(item['''raw'''] , dtype=SCREAMING_SNAKE_CASE__ )
snake_case_ = (
item['''stride'''][0] // size_of_sample,
item['''stride'''][1] // size_of_sample,
)
snake_case_ = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = False ):
snake_case_ = b''''''
snake_case_, snake_case_ = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
snake_case_ = 0
for raw in iterator:
acc += raw
if stream and len(SCREAMING_SNAKE_CASE__ ) < chunk_len:
snake_case_ = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(SCREAMING_SNAKE_CASE__ ) >= chunk_len:
# We are flushing the accumulator
snake_case_ = (_stride_left, stride_right)
snake_case_ = {'''raw''': acc[:chunk_len], '''stride''': stride}
if stream:
snake_case_ = False
yield item
snake_case_ = stride_left
snake_case_ = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(SCREAMING_SNAKE_CASE__ ) > stride_left:
snake_case_ = {'''raw''': acc, '''stride''': (_stride_left, 0)}
if stream:
snake_case_ = False
yield item
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = 2**24 # 16Mo
try:
with subprocess.Popen(SCREAMING_SNAKE_CASE__ , stdout=subprocess.PIPE , bufsize=SCREAMING_SNAKE_CASE__ ) as ffmpeg_process:
while True:
snake_case_ = ffmpeg_process.stdout.read(SCREAMING_SNAKE_CASE__ )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to stream audio files from filename''' ) from error | 39 |
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = [0 for i in range(r + 1 )]
# nc0 = 1
snake_case_ = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
snake_case_ = min(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5)) | 39 | 1 |
from __future__ import annotations
import requests
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = F'''https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'''
return requests.get(SCREAMING_SNAKE_CASE__ ).json()
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = 10 ):
snake_case_ = '''https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'''
snake_case_ = requests.get(SCREAMING_SNAKE_CASE__ ).json()[:max_stories]
return [get_hackernews_story(SCREAMING_SNAKE_CASE__ ) for story_id in story_ids]
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = 10 ):
snake_case_ = hackernews_top_stories(SCREAMING_SNAKE_CASE__ )
return "\n".join('''* [{title}]({url})'''.format(**SCREAMING_SNAKE_CASE__ ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown()) | 39 |
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
lowerCAmelCase_ = {
'''gwf-440k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/gwf-440k.ckpt''',
'''sample_rate''': 4_80_00,
'''sample_size''': 6_55_36,
},
'''jmann-small-190k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt''',
'''sample_rate''': 4_80_00,
'''sample_size''': 6_55_36,
},
'''jmann-large-580k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt''',
'''sample_rate''': 4_80_00,
'''sample_size''': 13_10_72,
},
'''maestro-uncond-150k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt''',
'''sample_rate''': 1_60_00,
'''sample_size''': 6_55_36,
},
'''unlocked-uncond-250k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt''',
'''sample_rate''': 1_60_00,
'''sample_size''': 6_55_36,
},
'''honk-140k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/honk-140k.ckpt''',
'''sample_rate''': 1_60_00,
'''sample_size''': 6_55_36,
},
}
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return torch.atana(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) / math.pi * 2
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = torch.sin(t * math.pi / 2 ) ** 2
snake_case_ = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
class snake_case_ ( __A ):
'''simple docstring'''
pass
class snake_case_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , _UpperCamelCase : int ) ->Optional[int]:
super().__init__()
snake_case_ = DiffusionAttnUnetaD(_UpperCamelCase , n_attn_layers=4 )
snake_case_ = deepcopy(self.diffusion )
snake_case_ = torch.quasirandom.SobolEngine(1 , scramble=_UpperCamelCase )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = MODELS_MAP[model_name]['''url''']
os.system(F'''wget {url} ./''' )
return F'''./{model_name}.ckpt'''
lowerCAmelCase_ = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
}
lowerCAmelCase_ = {
'''8''': '''resnets.0''',
'''9''': '''attentions.0''',
'''10''': '''resnets.1''',
'''11''': '''attentions.1''',
'''12''': '''resnets.2''',
'''13''': '''attentions.2''',
}
lowerCAmelCase_ = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
'''8''': '''resnets.3''',
'''9''': '''attentions.3''',
'''10''': '''resnets.4''',
'''11''': '''attentions.4''',
'''12''': '''resnets.5''',
'''13''': '''attentions.5''',
}
lowerCAmelCase_ = {
'''0''': '''resnets.0''',
'''1''': '''resnets.1''',
'''2''': '''resnets.2''',
'''4''': '''resnets.0''',
'''5''': '''resnets.1''',
'''6''': '''resnets.2''',
}
lowerCAmelCase_ = {
'''skip''': '''conv_skip''',
'''main.0''': '''conv_1''',
'''main.1''': '''group_norm_1''',
'''main.3''': '''conv_2''',
'''main.4''': '''group_norm_2''',
}
lowerCAmelCase_ = {
'''norm''': '''group_norm''',
'''qkv_proj''': ['''query''', '''key''', '''value'''],
'''out_proj''': ['''proj_attn'''],
}
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
if name.startswith('''skip''' ):
return name.replace('''skip''' , RES_CONV_MAP['''skip'''] )
# name has to be of format main.{digit}
if not name.startswith('''main.''' ):
raise ValueError(F'''ResConvBlock error with {name}''' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
for key, value in ATTN_MAP.items():
if name.startswith(SCREAMING_SNAKE_CASE__ ) and not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return name.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif name.startswith(SCREAMING_SNAKE_CASE__ ):
return [name.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for v in value]
raise ValueError(F'''Attn error with {name}''' )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=13 ):
snake_case_ = input_string
if string.split('''.''' )[0] == "timestep_embed":
return string.replace('''timestep_embed''' , '''time_proj''' )
snake_case_ = 0
if string.startswith('''net.3.''' ):
depth += 1
snake_case_ = string[6:]
elif string.startswith('''net.''' ):
snake_case_ = string[4:]
while string.startswith('''main.7.''' ):
depth += 1
snake_case_ = string[7:]
if string.startswith('''main.''' ):
snake_case_ = string[5:]
# mid block
if string[:2].isdigit():
snake_case_ = string[:2]
snake_case_ = string[2:]
else:
snake_case_ = string[0]
snake_case_ = string[1:]
if depth == max_depth:
snake_case_ = MID_NUM_TO_LAYER[layer_num]
snake_case_ = '''mid_block'''
elif depth > 0 and int(SCREAMING_SNAKE_CASE__ ) < 7:
snake_case_ = DOWN_NUM_TO_LAYER[layer_num]
snake_case_ = F'''down_blocks.{depth}'''
elif depth > 0 and int(SCREAMING_SNAKE_CASE__ ) > 7:
snake_case_ = UP_NUM_TO_LAYER[layer_num]
snake_case_ = F'''up_blocks.{max_depth - depth - 1}'''
elif depth == 0:
snake_case_ = DEPTH_0_TO_LAYER[layer_num]
snake_case_ = F'''up_blocks.{max_depth - 1}''' if int(SCREAMING_SNAKE_CASE__ ) > 3 else '''down_blocks.0'''
if not string_left.startswith('''.''' ):
raise ValueError(F'''Naming error with {input_string} and string_left: {string_left}.''' )
snake_case_ = string_left[1:]
if "resnets" in new_layer:
snake_case_ = convert_resconv_naming(SCREAMING_SNAKE_CASE__ )
elif "attentions" in new_layer:
snake_case_ = convert_attn_naming(SCREAMING_SNAKE_CASE__ )
snake_case_ = new_string_left
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = prefix + '''.''' + new_layer + '''.''' + string_left
else:
snake_case_ = [prefix + '''.''' + new_layer + '''.''' + s for s in string_left]
return new_string
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = {}
for k, v in state_dict.items():
if k.endswith('''kernel''' ):
# up- and downsample layers, don't have trainable weights
continue
snake_case_ = rename(SCREAMING_SNAKE_CASE__ )
# check if we need to transform from Conv => Linear for attention
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = transform_conv_attns(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
snake_case_ = v
return new_state_dict
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if len(SCREAMING_SNAKE_CASE__ ) == 1:
if len(v.shape ) == 3:
# weight
snake_case_ = v[:, :, 0]
else:
# bias
snake_case_ = v
else:
# qkv matrices
snake_case_ = v.shape[0]
snake_case_ = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
snake_case_ = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
snake_case_ = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
snake_case_ = args.model_path.split('''/''' )[-1].split('''.''' )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), F'''Make sure to provide one of the official model names {MODELS_MAP.keys()}'''
snake_case_ = download(SCREAMING_SNAKE_CASE__ )
snake_case_ = MODELS_MAP[model_name]['''sample_rate''']
snake_case_ = MODELS_MAP[model_name]['''sample_size''']
snake_case_ = Object()
snake_case_ = sample_size
snake_case_ = sample_rate
snake_case_ = 0
snake_case_ = UNetaDModel(sample_size=SCREAMING_SNAKE_CASE__ , sample_rate=SCREAMING_SNAKE_CASE__ )
snake_case_ = diffusers_model.state_dict()
snake_case_ = DiffusionUncond(SCREAMING_SNAKE_CASE__ )
orig_model.load_state_dict(torch.load(args.model_path , map_location=SCREAMING_SNAKE_CASE__ )['''state_dict'''] )
snake_case_ = orig_model.diffusion_ema.eval()
snake_case_ = orig_model.state_dict()
snake_case_ = rename_orig_weights(SCREAMING_SNAKE_CASE__ )
snake_case_ = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
snake_case_ = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(SCREAMING_SNAKE_CASE__ ) == 0, F'''Problem with {renamed_minus_diffusers}'''
assert all(k.endswith('''kernel''' ) for k in list(SCREAMING_SNAKE_CASE__ ) ), F'''Problem with {diffusers_minus_renamed}'''
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), F'''Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'''
if key == "time_proj.weight":
snake_case_ = value.squeeze()
snake_case_ = value
diffusers_model.load_state_dict(SCREAMING_SNAKE_CASE__ )
snake_case_ = 100
snake_case_ = 33
snake_case_ = IPNDMScheduler(num_train_timesteps=SCREAMING_SNAKE_CASE__ )
snake_case_ = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
snake_case_ = torch.randn([1, 2, config.sample_size] , generator=SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
snake_case_ = torch.linspace(1 , 0 , steps + 1 , device=SCREAMING_SNAKE_CASE__ )[:-1]
snake_case_ = get_crash_schedule(SCREAMING_SNAKE_CASE__ )
snake_case_ = DanceDiffusionPipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
snake_case_ = torch.manual_seed(33 )
snake_case_ = pipe(num_inference_steps=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ ).audios
snake_case_ = sampling.iplms_sample(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , {} )
snake_case_ = generated.clamp(-1 , 1 )
snake_case_ = (generated - audio).abs().sum()
snake_case_ = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print('''Diff sum''' , SCREAMING_SNAKE_CASE__ )
print('''Diff max''' , SCREAMING_SNAKE_CASE__ )
assert diff_max < 1E-3, F'''Diff max: {diff_max} is too much :-/'''
print(F'''Conversion for {model_name} successful!''' )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
lowerCAmelCase_ = parser.parse_args()
main(args) | 39 | 1 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''huggingface/informer-tourism-monthly''': (
'''https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'''
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = "informer"
SCREAMING_SNAKE_CASE : int = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self : Dict , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : str = "student_t" , _UpperCamelCase : str = "nll" , _UpperCamelCase : int = 1 , _UpperCamelCase : List[int] = None , _UpperCamelCase : Optional[Union[str, bool]] = "mean" , _UpperCamelCase : int = 0 , _UpperCamelCase : int = 0 , _UpperCamelCase : int = 0 , _UpperCamelCase : int = 0 , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : int = 6_4 , _UpperCamelCase : int = 3_2 , _UpperCamelCase : int = 3_2 , _UpperCamelCase : int = 2 , _UpperCamelCase : int = 2 , _UpperCamelCase : int = 2 , _UpperCamelCase : int = 2 , _UpperCamelCase : bool = True , _UpperCamelCase : str = "gelu" , _UpperCamelCase : float = 0.05 , _UpperCamelCase : float = 0.1 , _UpperCamelCase : float = 0.1 , _UpperCamelCase : float = 0.1 , _UpperCamelCase : float = 0.1 , _UpperCamelCase : int = 1_0_0 , _UpperCamelCase : float = 0.02 , _UpperCamelCase : Dict=True , _UpperCamelCase : str = "prob" , _UpperCamelCase : int = 5 , _UpperCamelCase : bool = True , **_UpperCamelCase : Optional[Any] , ) ->Optional[int]:
# time series specific configuration
snake_case_ = prediction_length
snake_case_ = context_length or prediction_length
snake_case_ = distribution_output
snake_case_ = loss
snake_case_ = input_size
snake_case_ = num_time_features
snake_case_ = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
snake_case_ = scaling
snake_case_ = num_dynamic_real_features
snake_case_ = num_static_real_features
snake_case_ = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(_UpperCamelCase ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
snake_case_ = cardinality
else:
snake_case_ = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(_UpperCamelCase ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
snake_case_ = embedding_dimension
else:
snake_case_ = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
snake_case_ = num_parallel_samples
# Transformer architecture configuration
snake_case_ = input_size * len(self.lags_sequence ) + self._number_of_features
snake_case_ = d_model
snake_case_ = encoder_attention_heads
snake_case_ = decoder_attention_heads
snake_case_ = encoder_ffn_dim
snake_case_ = decoder_ffn_dim
snake_case_ = encoder_layers
snake_case_ = decoder_layers
snake_case_ = dropout
snake_case_ = attention_dropout
snake_case_ = activation_dropout
snake_case_ = encoder_layerdrop
snake_case_ = decoder_layerdrop
snake_case_ = activation_function
snake_case_ = init_std
snake_case_ = use_cache
# Informer
snake_case_ = attention_type
snake_case_ = sampling_factor
snake_case_ = distil
super().__init__(is_encoder_decoder=_UpperCamelCase , **_UpperCamelCase )
@property
def snake_case__( self : Optional[Any] ) ->int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
) | 39 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {'''configuration_vit_msn''': ['''VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMSNConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMSNModel''',
'''ViTMSNForImageClassification''',
'''ViTMSNPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 39 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''shi-labs/dinat-mini-in1k-224''': '''https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json''',
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class snake_case_ ( __A , __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = "dinat"
SCREAMING_SNAKE_CASE : Dict = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Optional[Any] , _UpperCamelCase : Any=4 , _UpperCamelCase : Tuple=3 , _UpperCamelCase : Optional[int]=6_4 , _UpperCamelCase : int=[3, 4, 6, 5] , _UpperCamelCase : str=[2, 4, 8, 1_6] , _UpperCamelCase : List[str]=7 , _UpperCamelCase : Optional[int]=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , _UpperCamelCase : List[str]=3.0 , _UpperCamelCase : int=True , _UpperCamelCase : int=0.0 , _UpperCamelCase : List[str]=0.0 , _UpperCamelCase : List[str]=0.1 , _UpperCamelCase : List[str]="gelu" , _UpperCamelCase : Tuple=0.02 , _UpperCamelCase : List[Any]=1e-5 , _UpperCamelCase : Union[str, Any]=0.0 , _UpperCamelCase : Dict=None , _UpperCamelCase : Optional[Any]=None , **_UpperCamelCase : Any , ) ->List[str]:
super().__init__(**_UpperCamelCase )
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = embed_dim
snake_case_ = depths
snake_case_ = len(_UpperCamelCase )
snake_case_ = num_heads
snake_case_ = kernel_size
snake_case_ = dilations
snake_case_ = mlp_ratio
snake_case_ = qkv_bias
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = drop_path_rate
snake_case_ = hidden_act
snake_case_ = layer_norm_eps
snake_case_ = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
snake_case_ = int(embed_dim * 2 ** (len(_UpperCamelCase ) - 1) )
snake_case_ = layer_scale_init_value
snake_case_ = ['''stem'''] + [f'''stage{idx}''' for idx in range(1 , len(_UpperCamelCase ) + 1 )]
snake_case_, snake_case_ = get_aligned_output_features_output_indices(
out_features=_UpperCamelCase , out_indices=_UpperCamelCase , stage_names=self.stage_names ) | 39 |
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : Optional[Any] ) ->Any:
snake_case_ = tf.convert_to_tensor(
[
[
8.2220991, # 3rd highest value; idx. 0
-0.5620044,
5.23229752,
4.0386393,
-6.8798378,
-0.54785802,
-3.2012153,
2.92777176,
1.88171953,
7.35341276, # 5th highest value; idx. 9
8.43207833, # 2nd highest value; idx. 10
-9.85711836,
-5.96209236,
-1.13039161,
-7.1115294,
-0.8369633,
-5.3186408,
7.06427407,
0.81369344,
-0.82023817,
-5.9179796,
0.58813443,
-6.99778438,
4.71551189,
-0.18771637,
7.44020759, # 4th highest value; idx. 25
9.38450987, # 1st highest value; idx. 26
2.12662941,
-9.32562038,
2.35652522,
], # cummulative prob of 5 highest values <= 0.6
[
0.58425518,
4.53139238,
-5.57510464,
-6.28030699,
-7.19529503,
-4.02122551,
1.39337037,
-6.06707057,
1.59480517,
-9.643119,
0.03907799,
0.67231762,
-8.88206726,
6.27115922, # 4th highest value; idx. 13
2.28520723,
4.82767506,
4.30421368,
8.8275313, # 2nd highest value; idx. 17
5.44029958, # 5th highest value; idx. 18
-4.4735794,
7.38579536, # 3rd highest value; idx. 20
-2.91051663,
2.61946077,
-2.5674762,
-9.48959302,
-4.02922645,
-1.35416918,
9.67702323, # 1st highest value; idx. 27
-5.89478553,
1.85370467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
snake_case_ = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 1_0], [0, 2_5], [0, 2_6], [1, 1_3], [1, 1_7], [1, 1_8], [1, 2_0], [1, 2_7]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
snake_case_ = tf.convert_to_tensor(
[8.222099, 7.3534126, 8.432078, 7.4402075, 9.38451, 6.271159, 8.827531, 5.4402995, 7.3857956, 9.677023] , dtype=tf.floataa , ) # expected non filtered values as noted above
snake_case_ = tf_top_k_top_p_filtering(_UpperCamelCase , top_k=1_0 , top_p=0.6 , min_tokens_to_keep=4 )
snake_case_ = output[output != -float('''inf''' )]
snake_case_ = tf.cast(
tf.where(tf.not_equal(_UpperCamelCase , tf.constant(-float('''inf''' ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(_UpperCamelCase , _UpperCamelCase , rtol=1e-12 )
tf.debugging.assert_equal(_UpperCamelCase , _UpperCamelCase )
@require_tf
class snake_case_ ( unittest.TestCase , __A ):
'''simple docstring'''
if is_tf_available():
SCREAMING_SNAKE_CASE : Optional[int] = {
"AutoModelForCausalLM": TFAutoModelForCausalLM,
"AutoModelForSpeechSeq2Seq": TFAutoModelForSpeechSeqaSeq,
"AutoModelForSeq2SeqLM": TFAutoModelForSeqaSeqLM,
"AutoModelForVision2Seq": TFAutoModelForVisionaSeq,
"LogitsProcessorList": TFLogitsProcessorList,
"MinLengthLogitsProcessor": TFMinLengthLogitsProcessor,
"create_tensor_fn": tf.convert_to_tensor,
"floats_tensor": floats_tensor,
"return_tensors": "tf",
}
@slow
def snake_case__( self : List[Any] ) ->Optional[int]:
# TF-only test: tf.saved_model export
snake_case_ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
snake_case_ = 2
snake_case_ = 2
class snake_case_ ( tf.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , _UpperCamelCase : Optional[int] ) ->List[Any]:
super(_UpperCamelCase , self ).__init__()
snake_case_ = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((None, input_length) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=_UpperCamelCase , )
def snake_case__( self : List[Any] , _UpperCamelCase : int , _UpperCamelCase : Union[str, Any] ) ->List[Any]:
snake_case_ = self.model.generate(
input_ids=_UpperCamelCase , attention_mask=_UpperCamelCase , max_new_tokens=_UpperCamelCase , return_dict_in_generate=_UpperCamelCase , )
return {"sequences": outputs["sequences"]}
snake_case_ = [[2, 0], [1_0_2, 1_0_3]]
snake_case_ = [[1, 0], [1, 1]]
snake_case_ = DummyModel(model=_UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(_UpperCamelCase , _UpperCamelCase , signatures={'''serving_default''': dummy_model.serving} )
snake_case_ = tf.saved_model.load(_UpperCamelCase ).signatures['''serving_default''']
for batch_size in range(1 , len(_UpperCamelCase ) + 1 ):
snake_case_ = {
'''input_ids''': tf.constant(dummy_input_ids[:batch_size] ),
'''attention_mask''': tf.constant(dummy_attention_masks[:batch_size] ),
}
snake_case_ = serving_func(**_UpperCamelCase )['''sequences''']
snake_case_ = test_model.generate(**_UpperCamelCase , max_new_tokens=_UpperCamelCase )
tf.debugging.assert_equal(_UpperCamelCase , _UpperCamelCase )
@slow
def snake_case__( self : List[str] ) ->int:
# TF-only test: tf.saved_model export
snake_case_ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
snake_case_ = 1
snake_case_ = 2
class snake_case_ ( tf.Module ):
'''simple docstring'''
def __init__( self : str , _UpperCamelCase : Any ) ->List[str]:
super(_UpperCamelCase , self ).__init__()
snake_case_ = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((batch_size, None) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=_UpperCamelCase , )
def snake_case__( self : int , _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] ) ->Optional[int]:
snake_case_ = self.model.generate(
input_ids=_UpperCamelCase , attention_mask=_UpperCamelCase , max_new_tokens=_UpperCamelCase , return_dict_in_generate=_UpperCamelCase , )
return {"sequences": outputs["sequences"]}
snake_case_ = [[2], [1_0_2, 1_0_3]]
snake_case_ = [[1], [1, 1]]
snake_case_ = DummyModel(model=_UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(_UpperCamelCase , _UpperCamelCase , signatures={'''serving_default''': dummy_model.serving} )
snake_case_ = tf.saved_model.load(_UpperCamelCase ).signatures['''serving_default''']
for input_row in range(len(_UpperCamelCase ) ):
snake_case_ = {
'''input_ids''': tf.constant([dummy_input_ids[input_row]] ),
'''attention_mask''': tf.constant([dummy_attention_masks[input_row]] ),
}
snake_case_ = serving_func(**_UpperCamelCase )['''sequences''']
snake_case_ = test_model.generate(**_UpperCamelCase , max_new_tokens=_UpperCamelCase )
tf.debugging.assert_equal(_UpperCamelCase , _UpperCamelCase )
@slow
@require_tensorflow_text
def snake_case__( self : Optional[Any] ) ->List[Any]:
# TF-only test: tf.saved_model export
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id='''google/flan-t5-small''' , filename='''spiece.model''' , local_dir=_UpperCamelCase )
class snake_case_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Tuple ) ->List[Any]:
super().__init__()
snake_case_ = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(_UpperCamelCase , '''spiece.model''' ) , '''rb''' ).read() )
snake_case_ = TFAutoModelForSeqaSeqLM.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
def snake_case__( self : Optional[Any] , _UpperCamelCase : List[Any] , *_UpperCamelCase : Optional[int] , **_UpperCamelCase : str ) ->List[Any]:
snake_case_ = self.tokenizer.tokenize(_UpperCamelCase )
snake_case_, snake_case_ = text.pad_model_inputs(
_UpperCamelCase , max_seq_length=6_4 , pad_value=self.model.config.pad_token_id )
snake_case_ = self.model.generate(input_ids=_UpperCamelCase , attention_mask=_UpperCamelCase )
return self.tokenizer.detokenize(_UpperCamelCase )
snake_case_ = CompleteSentenceTransformer()
snake_case_ = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='''inputs''' )
snake_case_ = complete_model(_UpperCamelCase )
snake_case_ = tf.keras.Model(_UpperCamelCase , _UpperCamelCase )
keras_model.save(_UpperCamelCase )
def snake_case__( self : Any ) ->List[Any]:
# Has PT equivalent: this test relies on random sampling
snake_case_ = {
'''do_sample''': True,
'''num_beams''': 1,
'''top_p''': 0.7,
'''top_k''': 1_0,
'''temperature''': 0.7,
}
snake_case_ = 1_4
snake_case_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
snake_case_ = '''Hello, my dog is cute and'''
snake_case_ = tokenizer(_UpperCamelCase , return_tensors='''tf''' )
snake_case_ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
snake_case_ = 6_3_8
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
snake_case_ = model.generate(**_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
self.assertTrue(expectation == len(generated_tokens[0] ) )
snake_case_ = [6_3_8, 1_9_8]
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
snake_case_ = model.generate(**_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def snake_case__( self : str ) ->Dict:
# Has PT equivalent: ample use of framework-specific code
snake_case_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
snake_case_ = '''Hugging Face is a technology company based in New York and Paris.'''
snake_case_ = bart_tokenizer(_UpperCamelCase , return_tensors='''tf''' ).input_ids
snake_case_ = TFBartForConditionalGeneration.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
snake_case_ = bart_model.generate(_UpperCamelCase ).numpy()
class snake_case_ ( __A ):
'''simple docstring'''
def snake_case__( self : str , _UpperCamelCase : Any , _UpperCamelCase : Tuple=None , **_UpperCamelCase : Optional[int] ) ->List[str]:
return super().call(_UpperCamelCase , **_UpperCamelCase )
snake_case_ = FakeBart.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
snake_case_ = bart_model.generate(_UpperCamelCase , foo='''bar''' ).numpy()
self.assertTrue(np.array_equal(_UpperCamelCase , _UpperCamelCase ) )
class snake_case_ ( bart_model.model.encoder.__class__ ):
'''simple docstring'''
def snake_case__( self : Union[str, Any] , _UpperCamelCase : str , **_UpperCamelCase : Tuple ) ->Optional[Any]:
return super().call(_UpperCamelCase , **_UpperCamelCase )
snake_case_ = FakeEncoder(bart_model.config , bart_model.model.shared )
snake_case_ = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
snake_case_ = bart_model.generate(_UpperCamelCase ).numpy()
with self.assertRaises(_UpperCamelCase ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(_UpperCamelCase , foo='''bar''' ) | 39 | 1 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {'''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase_ = {
'''tokenizer_file''': {
'''bigscience/tokenizer''': '''https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json''',
'''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json''',
'''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json''',
'''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json''',
'''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json''',
'''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json''',
'''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json''',
},
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : Any = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE : List[str] = None
def __init__( self : Optional[Any] , _UpperCamelCase : Optional[int]=None , _UpperCamelCase : List[Any]=None , _UpperCamelCase : Union[str, Any]=None , _UpperCamelCase : List[str]="<unk>" , _UpperCamelCase : Optional[int]="<s>" , _UpperCamelCase : Dict="</s>" , _UpperCamelCase : Dict="<pad>" , _UpperCamelCase : Optional[Any]=False , _UpperCamelCase : Any=False , **_UpperCamelCase : Tuple , ) ->Any:
super().__init__(
_UpperCamelCase , _UpperCamelCase , tokenizer_file=_UpperCamelCase , unk_token=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , pad_token=_UpperCamelCase , add_prefix_space=_UpperCamelCase , clean_up_tokenization_spaces=_UpperCamelCase , **_UpperCamelCase , )
snake_case_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _UpperCamelCase ) != add_prefix_space:
snake_case_ = getattr(_UpperCamelCase , pre_tok_state.pop('''type''' ) )
snake_case_ = add_prefix_space
snake_case_ = pre_tok_class(**_UpperCamelCase )
snake_case_ = add_prefix_space
def snake_case__( self : Union[str, Any] , *_UpperCamelCase : List[Any] , **_UpperCamelCase : Optional[Any] ) ->BatchEncoding:
snake_case_ = kwargs.get('''is_split_into_words''' , _UpperCamelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
''' pretokenized inputs.''' )
return super()._batch_encode_plus(*_UpperCamelCase , **_UpperCamelCase )
def snake_case__( self : Any , *_UpperCamelCase : Dict , **_UpperCamelCase : Optional[Any] ) ->BatchEncoding:
snake_case_ = kwargs.get('''is_split_into_words''' , _UpperCamelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
''' pretokenized inputs.''' )
return super()._encode_plus(*_UpperCamelCase , **_UpperCamelCase )
def snake_case__( self : Any , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) ->Tuple[str]:
snake_case_ = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase )
return tuple(_UpperCamelCase )
def snake_case__( self : Union[str, Any] , _UpperCamelCase : "Conversation" ) ->List[int]:
snake_case_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase ) + [self.eos_token_id] )
if len(_UpperCamelCase ) > self.model_max_length:
snake_case_ = input_ids[-self.model_max_length :]
return input_ids | 39 |
import unittest
from transformers import DonutProcessor
lowerCAmelCase_ = '''naver-clova-ix/donut-base'''
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : Union[str, Any] ) ->Any:
snake_case_ = DonutProcessor.from_pretrained(_UpperCamelCase )
def snake_case__( self : Dict ) ->str:
snake_case_ = {
'''name''': '''John Doe''',
'''age''': '''99''',
'''city''': '''Atlanta''',
'''state''': '''GA''',
'''zip''': '''30301''',
'''phone''': '''123-4567''',
'''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}],
}
snake_case_ = (
'''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'''
'''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'''
'''<s_nicknames><s_nickname>Johnny</s_nickname>'''
'''<sep/><s_nickname>JD</s_nickname></s_nicknames>'''
)
snake_case_ = self.processor.tokenajson(_UpperCamelCase )
self.assertDictEqual(_UpperCamelCase , _UpperCamelCase ) | 39 | 1 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase_ = {
'''vocab_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/vocab.json''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/vocab.json''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/vocab.json''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/vocab.json''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/vocab.json''',
},
'''merges_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/merges.txt''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/merges.txt''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/merges.txt''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/merges.txt''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/tokenizer.json''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/tokenizer.json''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/tokenizer.json''',
},
}
lowerCAmelCase_ = {
'''gpt2''': 10_24,
'''gpt2-medium''': 10_24,
'''gpt2-large''': 10_24,
'''gpt2-xl''': 10_24,
'''distilgpt2''': 10_24,
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : Union[str, Any] = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE : Union[str, Any] = GPTaTokenizer
def __init__( self : List[str] , _UpperCamelCase : List[Any]=None , _UpperCamelCase : int=None , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : int="<|endoftext|>" , _UpperCamelCase : List[Any]="<|endoftext|>" , _UpperCamelCase : Any="<|endoftext|>" , _UpperCamelCase : List[str]=False , **_UpperCamelCase : Tuple , ) ->Optional[Any]:
super().__init__(
_UpperCamelCase , _UpperCamelCase , tokenizer_file=_UpperCamelCase , unk_token=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , add_prefix_space=_UpperCamelCase , **_UpperCamelCase , )
snake_case_ = kwargs.pop('''add_bos_token''' , _UpperCamelCase )
snake_case_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _UpperCamelCase ) != add_prefix_space:
snake_case_ = getattr(_UpperCamelCase , pre_tok_state.pop('''type''' ) )
snake_case_ = add_prefix_space
snake_case_ = pre_tok_class(**_UpperCamelCase )
snake_case_ = add_prefix_space
def snake_case__( self : Tuple , *_UpperCamelCase : List[Any] , **_UpperCamelCase : List[str] ) ->BatchEncoding:
snake_case_ = kwargs.get('''is_split_into_words''' , _UpperCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_UpperCamelCase , **_UpperCamelCase )
def snake_case__( self : str , *_UpperCamelCase : Any , **_UpperCamelCase : List[str] ) ->BatchEncoding:
snake_case_ = kwargs.get('''is_split_into_words''' , _UpperCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_UpperCamelCase , **_UpperCamelCase )
def snake_case__( self : Union[str, Any] , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) ->Tuple[str]:
snake_case_ = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase )
return tuple(_UpperCamelCase )
def snake_case__( self : str , _UpperCamelCase : "Conversation" ) ->List[int]:
snake_case_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase ) + [self.eos_token_id] )
if len(_UpperCamelCase ) > self.model_max_length:
snake_case_ = input_ids[-self.model_max_length :]
return input_ids | 39 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
if not nums:
raise ValueError('''List is empty''' )
return sum(SCREAMING_SNAKE_CASE__ ) / len(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod() | 39 | 1 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase_ = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class snake_case_ ( __A , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = XGLMTokenizer
SCREAMING_SNAKE_CASE : Union[str, Any] = XGLMTokenizerFast
SCREAMING_SNAKE_CASE : Dict = True
SCREAMING_SNAKE_CASE : List[Any] = True
def snake_case__( self : int ) ->Any:
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ = XGLMTokenizer(_UpperCamelCase , keep_accents=_UpperCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case__( self : Any ) ->Dict:
snake_case_ = '''<pad>'''
snake_case_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCamelCase ) , _UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCamelCase ) , _UpperCamelCase )
def snake_case__( self : Optional[int] ) ->List[Any]:
snake_case_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(len(_UpperCamelCase ) , 1_0_0_8 )
def snake_case__( self : str ) ->int:
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_8 )
def snake_case__( self : int ) ->List[Any]:
snake_case_ = XGLMTokenizer(_UpperCamelCase , keep_accents=_UpperCamelCase )
snake_case_ = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_UpperCamelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
snake_case_ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_UpperCamelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
snake_case_ = tokenizer.convert_tokens_to_ids(_UpperCamelCase )
self.assertListEqual(
_UpperCamelCase , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
snake_case_ = tokenizer.convert_ids_to_tokens(_UpperCamelCase )
self.assertListEqual(
_UpperCamelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def snake_case__( self : Any ) ->Union[str, Any]:
return XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
def snake_case__( self : int ) ->int:
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(_UpperCamelCase , f.name )
snake_case_ = XGLMTokenizer(f.name , keep_accents=_UpperCamelCase )
snake_case_ = pickle.dumps(_UpperCamelCase )
pickle.loads(_UpperCamelCase )
def snake_case__( self : Any ) ->Tuple:
if not self.test_rust_tokenizer:
return
snake_case_ = self.get_tokenizer()
snake_case_ = self.get_rust_tokenizer()
snake_case_ = '''I was born in 92000, and this is falsé.'''
snake_case_ = tokenizer.tokenize(_UpperCamelCase )
snake_case_ = rust_tokenizer.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
snake_case_ = tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
snake_case_ = rust_tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
snake_case_ = self.get_rust_tokenizer()
snake_case_ = tokenizer.encode(_UpperCamelCase )
snake_case_ = rust_tokenizer.encode(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
@slow
def snake_case__( self : List[str] ) ->Any:
snake_case_ = '''Hello World!'''
snake_case_ = [2, 3_1_2_2_7, 4_4_4_7, 3_5]
self.assertListEqual(_UpperCamelCase , self.big_tokenizer.encode(_UpperCamelCase ) )
@slow
def snake_case__( self : Union[str, Any] ) ->Optional[Any]:
snake_case_ = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth'''
)
# fmt: off
snake_case_ = [2, 1_0_1_8, 6_7, 1_1, 1_9_8_8, 2_6_1_7, 5_6_3_1, 2_7_8, 1_1, 3_4_0_7, 4_8, 7_1_6_3_0, 2_8_0_8_5, 4, 3_2_3_4, 1_5_7, 1_3, 6, 5, 6, 4, 3_5_2_6, 7_6_8, 1_5, 6_5_9, 5_7, 2_9_8, 3_9_8_3, 8_6_4, 1_2_9, 2_1, 6, 5, 1_3_6_7_5, 3_7_7, 6_5_2, 7_5_8_0, 1_0_3_4_1, 1_5_5, 2_8_1_7, 4_2_2, 1_6_6_6, 7, 1_6_7_4, 5_3, 1_1_3, 2_0_2_2_7_7, 1_7_8_9_2, 3_3, 6_0, 8_7, 4, 3_2_3_4, 1_5_7, 6_1, 2_6_6_7, 5_2_3_7_6, 1_9, 8_8, 2_3, 7_3_5]
# fmt: on
self.assertListEqual(_UpperCamelCase , self.big_tokenizer.encode(_UpperCamelCase ) )
@slow
def snake_case__( self : List[Any] ) ->List[Any]:
# fmt: off
snake_case_ = {
'''input_ids''': [[2, 1_0_8_8_2_5, 1_1_6_3, 1_5, 8_8_0_1_0, 4_7_3, 1_5_8_9_8, 1_5_7, 1_3_6_7_2, 1_8_5_7, 3_1_2, 8, 2_3_8_0_2_1, 1_1_6_3, 5_3, 1_3_6_7_2, 1_8_5_7, 3_1_2, 8, 5_3_2_8_3, 1_8_2_3_9_6, 8, 1_8_5_6_6, 1_6, 3_6_7_3_3, 4_1_0_1, 8, 2_3_0, 2_4_4_0_1_7, 1_2_2_5_5_3, 7, 1_5, 1_3_2_5_9_7, 4, 2_9_3, 1_2_5_1_1, 7_6_1_0, 4, 3_4_1_4, 1_3_2_5_9_7, 9, 4, 3_2_3_6_1, 3_6_2, 4, 7_3_4, 2_8_5_1_2, 3_2_5_6_9, 1_8, 4, 3_2_3_6_1, 2_6_0_9_6, 1_4_9_8_2, 7_3, 1_8_7_1_5, 2_1_4_3_3, 2_3_5_2_6_1, 1_5, 4_9_2, 1_2_4_2_7, 1_6, 5_3, 1_8_7_1_5, 2_1_4_3_3, 6_5_4_5_4, 1_5, 2_3_6_5_9, 5_6_3, 1_6, 2_7_8, 5_9_7, 2_8_4_3, 5_9_5, 7_9_3_1, 1_8_2_3_9_6, 6_4_1_8_6, 2_2, 8_8_6, 5_9_5, 1_3_2_9_8_1, 5_3, 2_5_5_4_0, 3_4_4_9, 4_3_9_8_2, 3_9_9_0_1, 5_9_5_1, 8_7_8, 3_3_0, 4, 2_7_6_9_4, 8_0_2_6_9, 3_1_2, 5_3, 6_5_1_7, 1_1_7_8_0, 6_1_1, 2_0_4_0_8, 5], [2, 6, 1_3_2_5_9_7, 6_7, 4_2_8_9_7, 3_3, 5_9_2, 8, 1_6_3_7_2_9, 2_5_5_4_0, 3_6_1, 1_3_6_9_9_7, 1_0_9_5_1_4, 1_7_3_2_3_0, 7, 5_0_1, 6_0, 1_0_2_9_1_3, 1_9_6, 5_6_3_1, 2_3_5, 6_3_2_4_3, 4_7_3, 6, 2_3_1_7_5_7, 7_4, 5_2_7_7, 7_9_0_5, 5_3, 3_0_9_5, 3_7_3_1_7, 2_2, 4_5_4, 1_8_3_8_7_4, 5], [2, 2_6_8, 3_1_2_9_8, 4_6_5_3_0, 6, 1_3_2_9_3_5, 4_3_8_3_1, 7, 5_9_7, 3_2, 2_4, 3_6_8_8, 9_8_6_5, 5]],
'''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCamelCase , model_name='''facebook/xglm-564M''' , padding=_UpperCamelCase , ) | 39 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : List[str] ) ->str:
snake_case_ = inspect.getfile(accelerate.test_utils )
snake_case_ = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps''', '''test_metrics.py'''] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
snake_case_ = test_metrics
@require_cpu
def snake_case__( self : str ) ->int:
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def snake_case__( self : Union[str, Any] ) ->Any:
debug_launcher(self.test_metrics.main )
@require_single_gpu
def snake_case__( self : List[Any] ) ->Tuple:
self.test_metrics.main()
@require_multi_gpu
def snake_case__( self : Any ) ->Union[str, Any]:
print(f'''Found {torch.cuda.device_count()} devices.''' )
snake_case_ = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() ) | 39 | 1 |
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = int(SCREAMING_SNAKE_CASE__ )
if n_element < 1:
snake_case_ = ValueError('''a should be a positive number''' )
raise my_error
snake_case_ = [1]
snake_case_, snake_case_, snake_case_ = (0, 0, 0)
snake_case_ = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
lowerCAmelCase_ = input('''Enter the last number (nth term) of the Hamming Number Series: ''')
print('''Formula of Hamming Number Series => 2^i * 3^j * 5^k''')
lowerCAmelCase_ = hamming(int(n))
print('''-----------------------------------------------------''')
print(f"""The list with nth numbers is: {hamming_numbers}""")
print('''-----------------------------------------------------''') | 39 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''huggingface/informer-tourism-monthly''': (
'''https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'''
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = "informer"
SCREAMING_SNAKE_CASE : int = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self : Dict , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : str = "student_t" , _UpperCamelCase : str = "nll" , _UpperCamelCase : int = 1 , _UpperCamelCase : List[int] = None , _UpperCamelCase : Optional[Union[str, bool]] = "mean" , _UpperCamelCase : int = 0 , _UpperCamelCase : int = 0 , _UpperCamelCase : int = 0 , _UpperCamelCase : int = 0 , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : int = 6_4 , _UpperCamelCase : int = 3_2 , _UpperCamelCase : int = 3_2 , _UpperCamelCase : int = 2 , _UpperCamelCase : int = 2 , _UpperCamelCase : int = 2 , _UpperCamelCase : int = 2 , _UpperCamelCase : bool = True , _UpperCamelCase : str = "gelu" , _UpperCamelCase : float = 0.05 , _UpperCamelCase : float = 0.1 , _UpperCamelCase : float = 0.1 , _UpperCamelCase : float = 0.1 , _UpperCamelCase : float = 0.1 , _UpperCamelCase : int = 1_0_0 , _UpperCamelCase : float = 0.02 , _UpperCamelCase : Dict=True , _UpperCamelCase : str = "prob" , _UpperCamelCase : int = 5 , _UpperCamelCase : bool = True , **_UpperCamelCase : Optional[Any] , ) ->Optional[int]:
# time series specific configuration
snake_case_ = prediction_length
snake_case_ = context_length or prediction_length
snake_case_ = distribution_output
snake_case_ = loss
snake_case_ = input_size
snake_case_ = num_time_features
snake_case_ = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
snake_case_ = scaling
snake_case_ = num_dynamic_real_features
snake_case_ = num_static_real_features
snake_case_ = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(_UpperCamelCase ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
snake_case_ = cardinality
else:
snake_case_ = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(_UpperCamelCase ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
snake_case_ = embedding_dimension
else:
snake_case_ = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
snake_case_ = num_parallel_samples
# Transformer architecture configuration
snake_case_ = input_size * len(self.lags_sequence ) + self._number_of_features
snake_case_ = d_model
snake_case_ = encoder_attention_heads
snake_case_ = decoder_attention_heads
snake_case_ = encoder_ffn_dim
snake_case_ = decoder_ffn_dim
snake_case_ = encoder_layers
snake_case_ = decoder_layers
snake_case_ = dropout
snake_case_ = attention_dropout
snake_case_ = activation_dropout
snake_case_ = encoder_layerdrop
snake_case_ = decoder_layerdrop
snake_case_ = activation_function
snake_case_ = init_std
snake_case_ = use_cache
# Informer
snake_case_ = attention_type
snake_case_ = sampling_factor
snake_case_ = distil
super().__init__(is_encoder_decoder=_UpperCamelCase , **_UpperCamelCase )
@property
def snake_case__( self : Optional[Any] ) ->int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
) | 39 | 1 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# Initialise PyTorch model
snake_case_ = RemBertConfig.from_json_file(SCREAMING_SNAKE_CASE__ )
print('''Building PyTorch model from configuration: {}'''.format(str(SCREAMING_SNAKE_CASE__ ) ) )
snake_case_ = RemBertModel(SCREAMING_SNAKE_CASE__ )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Save pytorch-model
print('''Save PyTorch model to {}'''.format(SCREAMING_SNAKE_CASE__ ) )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--rembert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained RemBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase_ = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path) | 39 |
import cmath
import math
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = math.radians(SCREAMING_SNAKE_CASE__ )
snake_case_ = math.radians(SCREAMING_SNAKE_CASE__ )
# Convert voltage and current to rectangular form
snake_case_ = cmath.rect(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ = cmath.rect(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod() | 39 | 1 |
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
lowerCAmelCase_ = '''sshleifer/bart-tiny-random'''
lowerCAmelCase_ = '''patrickvonplaten/t5-tiny-random'''
@require_torch
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case__( self : List[str] ) ->Union[str, Any]:
return AutoConfig.from_pretrained(_UpperCamelCase )
def snake_case__( self : List[str] ) ->str:
snake_case_, *snake_case_ = create_student_by_copying_alternating_layers(_UpperCamelCase , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def snake_case__( self : List[Any] ) ->Any:
snake_case_, *snake_case_ = create_student_by_copying_alternating_layers(_UpperCamelCase , tempfile.mkdtemp() , e=1 , d=_UpperCamelCase )
def snake_case__( self : List[str] ) ->str:
snake_case_, *snake_case_ = create_student_by_copying_alternating_layers(_UpperCamelCase , tempfile.mkdtemp() , e=1 , d=_UpperCamelCase )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def snake_case__( self : List[Any] ) ->int:
snake_case_, *snake_case_ = create_student_by_copying_alternating_layers(_UpperCamelCase , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def snake_case__( self : Tuple ) ->Any:
with self.assertRaises(_UpperCamelCase ):
create_student_by_copying_alternating_layers(_UpperCamelCase , tempfile.mkdtemp() , e=_UpperCamelCase , d=_UpperCamelCase ) | 39 |
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case_ :
'''simple docstring'''
def __init__( self : Optional[int] , _UpperCamelCase : Tuple , _UpperCamelCase : Optional[int]=1_3 , _UpperCamelCase : str=7 , _UpperCamelCase : int=True , _UpperCamelCase : Dict=True , _UpperCamelCase : int=False , _UpperCamelCase : Dict=True , _UpperCamelCase : Optional[int]=9_9 , _UpperCamelCase : str=3_2 , _UpperCamelCase : str=5 , _UpperCamelCase : str=4 , _UpperCamelCase : int=3_7 , _UpperCamelCase : int="gelu" , _UpperCamelCase : List[str]=0.1 , _UpperCamelCase : Dict=0.1 , _UpperCamelCase : str=5_1_2 , _UpperCamelCase : Optional[int]=1_6 , _UpperCamelCase : List[str]=2 , _UpperCamelCase : Any=0.02 , _UpperCamelCase : List[str]=3 , _UpperCamelCase : List[str]=4 , _UpperCamelCase : str=None , ) ->Dict:
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = num_choices
snake_case_ = scope
def snake_case__( self : str ) ->List[Any]:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_input_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ = None
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__( self : List[str] ) ->Tuple:
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , )
def snake_case__( self : int , _UpperCamelCase : int , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Any , _UpperCamelCase : List[str] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Union[str, Any] ) ->Dict:
snake_case_ = BioGptModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase )
snake_case_ = model(_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__( self : Optional[Any] , _UpperCamelCase : Dict , _UpperCamelCase : List[str] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : Optional[int] , _UpperCamelCase : Union[str, Any] , ) ->Optional[int]:
snake_case_ = BioGptForCausalLM(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__( self : Dict , _UpperCamelCase : str , _UpperCamelCase : List[str] , _UpperCamelCase : List[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str , *_UpperCamelCase : List[Any] ) ->Union[str, Any]:
snake_case_ = BioGptModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
# create attention mask
snake_case_ = torch.ones(input_ids.shape , dtype=torch.long , device=_UpperCamelCase )
snake_case_ = self.seq_length // 2
snake_case_ = 0
# first forward pass
snake_case_, snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase ).to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case_ = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
snake_case_ = ids_tensor((1,) , _UpperCamelCase ).item() + 1
snake_case_ = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
snake_case_ = random_other_next_tokens
# append to next input_ids and attn_mask
snake_case_ = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case_ = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=_UpperCamelCase )] , dim=1 , )
# get two different outputs
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase )['''last_hidden_state''']
snake_case_ = model(_UpperCamelCase , past_key_values=_UpperCamelCase , attention_mask=_UpperCamelCase )['''last_hidden_state''']
# select random slice
snake_case_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case_ = output_from_no_past[:, -1, random_slice_idx].detach()
snake_case_ = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-3 ) )
def snake_case__( self : Union[str, Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Dict , *_UpperCamelCase : List[Any] ) ->int:
snake_case_ = BioGptModel(config=_UpperCamelCase ).to(_UpperCamelCase ).eval()
snake_case_ = torch.ones(input_ids.shape , dtype=torch.long , device=_UpperCamelCase )
# first forward pass
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase , use_cache=_UpperCamelCase )
snake_case_, snake_case_ = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
snake_case_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case_ = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
snake_case_ = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case_ = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase )['''last_hidden_state''']
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase , past_key_values=_UpperCamelCase )[
'''last_hidden_state'''
]
# select random slice
snake_case_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case_ = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-3 ) )
def snake_case__( self : int , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str , _UpperCamelCase : str , _UpperCamelCase : Dict , _UpperCamelCase : Optional[Any] , *_UpperCamelCase : List[Any] , _UpperCamelCase : List[str]=False ) ->Dict:
snake_case_ = BioGptForCausalLM(_UpperCamelCase )
model.to(_UpperCamelCase )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
snake_case_ = model(_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def snake_case__( self : List[Any] , _UpperCamelCase : Optional[int] , *_UpperCamelCase : Dict ) ->Dict:
snake_case_ = BioGptModel(_UpperCamelCase )
snake_case_ = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def snake_case__( self : Any , _UpperCamelCase : Tuple , _UpperCamelCase : List[str] , _UpperCamelCase : List[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : int , *_UpperCamelCase : List[str] ) ->int:
snake_case_ = self.num_labels
snake_case_ = BioGptForTokenClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case__( self : Optional[Any] ) ->int:
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
),
) = config_and_inputs
snake_case_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class snake_case_ ( __A , __A , __A , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE : Tuple = (BioGptForCausalLM,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE : Optional[Any] = (
{
"feature-extraction": BioGptModel,
"text-classification": BioGptForSequenceClassification,
"text-generation": BioGptForCausalLM,
"token-classification": BioGptForTokenClassification,
"zero-shot": BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE : Tuple = False
def snake_case__( self : List[str] ) ->Union[str, Any]:
snake_case_ = BioGptModelTester(self )
snake_case_ = ConfigTester(self , config_class=_UpperCamelCase , hidden_size=3_7 )
def snake_case__( self : str ) ->int:
self.config_tester.run_common_tests()
def snake_case__( self : str ) ->Tuple:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def snake_case__( self : Tuple ) ->List[Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case_ = type
self.model_tester.create_and_check_model(*_UpperCamelCase )
def snake_case__( self : Tuple ) ->str:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*_UpperCamelCase )
def snake_case__( self : Union[str, Any] ) ->Dict:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*_UpperCamelCase , gradient_checkpointing=_UpperCamelCase )
def snake_case__( self : Optional[int] ) ->List[Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*_UpperCamelCase )
def snake_case__( self : List[Any] ) ->Union[str, Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*_UpperCamelCase )
def snake_case__( self : Optional[int] ) ->Optional[int]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*_UpperCamelCase )
@slow
def snake_case__( self : int ) ->Optional[Any]:
snake_case_ = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(_UpperCamelCase )
snake_case_ = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
snake_case_ = '''left'''
# Define PAD Token = EOS Token = 50256
snake_case_ = tokenizer.eos_token
snake_case_ = model.config.eos_token_id
# use different length sentences to test batching
snake_case_ = [
'''Hello, my dog is a little''',
'''Today, I''',
]
snake_case_ = tokenizer(_UpperCamelCase , return_tensors='''pt''' , padding=_UpperCamelCase )
snake_case_ = inputs['''input_ids'''].to(_UpperCamelCase )
snake_case_ = model.generate(
input_ids=_UpperCamelCase , attention_mask=inputs['''attention_mask'''].to(_UpperCamelCase ) , )
snake_case_ = tokenizer(sentences[0] , return_tensors='''pt''' ).input_ids.to(_UpperCamelCase )
snake_case_ = model.generate(input_ids=_UpperCamelCase )
snake_case_ = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item()
snake_case_ = tokenizer(sentences[1] , return_tensors='''pt''' ).input_ids.to(_UpperCamelCase )
snake_case_ = model.generate(input_ids=_UpperCamelCase , max_length=model.config.max_length - num_paddings )
snake_case_ = tokenizer.batch_decode(_UpperCamelCase , skip_special_tokens=_UpperCamelCase )
snake_case_ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=_UpperCamelCase )
snake_case_ = tokenizer.decode(output_padded[0] , skip_special_tokens=_UpperCamelCase )
snake_case_ = [
'''Hello, my dog is a little bit bigger than a little bit.''',
'''Today, I have a good idea of how to use the information''',
]
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
self.assertListEqual(_UpperCamelCase , [non_padded_sentence, padded_sentence] )
@slow
def snake_case__( self : Optional[int] ) ->List[str]:
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = BioGptModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def snake_case__( self : Optional[int] ) ->str:
snake_case_, snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = 3
snake_case_ = input_dict['''input_ids''']
snake_case_ = input_ids.ne(1 ).to(_UpperCamelCase )
snake_case_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
snake_case_ = BioGptForSequenceClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase , labels=_UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case__( self : str ) ->str:
snake_case_, snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = 3
snake_case_ = '''multi_label_classification'''
snake_case_ = input_dict['''input_ids''']
snake_case_ = input_ids.ne(1 ).to(_UpperCamelCase )
snake_case_ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
snake_case_ = BioGptForSequenceClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase , labels=_UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case__( self : int ) ->Any:
snake_case_ = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
snake_case_ = torch.tensor([[2, 4_8_0_5, 9, 6_5_6, 2_1]] )
snake_case_ = model(_UpperCamelCase )[0]
snake_case_ = 4_2_3_8_4
snake_case_ = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , _UpperCamelCase )
snake_case_ = torch.tensor(
[[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCamelCase , atol=1e-4 ) )
@slow
def snake_case__( self : List[str] ) ->Optional[int]:
snake_case_ = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
snake_case_ = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(_UpperCamelCase )
torch.manual_seed(0 )
snake_case_ = tokenizer('''COVID-19 is''' , return_tensors='''pt''' ).to(_UpperCamelCase )
snake_case_ = model.generate(
**_UpperCamelCase , min_length=1_0_0 , max_length=1_0_2_4 , num_beams=5 , early_stopping=_UpperCamelCase , )
snake_case_ = tokenizer.decode(output_ids[0] , skip_special_tokens=_UpperCamelCase )
snake_case_ = (
'''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'''
''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'''
''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'''
''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'''
''' more than 800,000 deaths.'''
)
self.assertEqual(_UpperCamelCase , _UpperCamelCase ) | 39 | 1 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowerCAmelCase_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
lowerCAmelCase_ = ''' def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
'''
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : Optional[Any] ) ->Tuple:
snake_case_ = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , '''models/bert/''' ) )
snake_case_ = self.transformer_dir
shutil.copy(
os.path.join(_UpperCamelCase , '''src/transformers/models/bert/modeling_bert.py''' ) , os.path.join(self.transformer_dir , '''models/bert/modeling_bert.py''' ) , )
def snake_case__( self : Dict ) ->Tuple:
snake_case_ = '''src/transformers'''
shutil.rmtree(self.transformer_dir )
def snake_case__( self : Any , _UpperCamelCase : str , _UpperCamelCase : Dict , _UpperCamelCase : Any , _UpperCamelCase : int=None ) ->List[str]:
snake_case_ = comment + f'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
snake_case_ = comment + f'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
snake_case_ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_1_9 )
snake_case_ = black.format_str(_UpperCamelCase , mode=_UpperCamelCase )
snake_case_ = os.path.join(self.transformer_dir , '''new_code.py''' )
with open(_UpperCamelCase , '''w''' , newline='''\n''' ) as f:
f.write(_UpperCamelCase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_UpperCamelCase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=_UpperCamelCase )
with open(_UpperCamelCase , '''r''' ) as f:
self.assertTrue(f.read() , _UpperCamelCase )
def snake_case__( self : Union[str, Any] ) ->Dict:
snake_case_ = check_copies.find_code_in_transformers('''models.bert.modeling_bert.BertLMPredictionHead''' )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : Optional[Any] ) ->str:
# Base copy consistency
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , _UpperCamelCase , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , re.sub('''Bert''' , '''TestModel''' , _UpperCamelCase ) , )
# Copy consistency with a really long name
snake_case_ = '''TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
f'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}''' , f'''{long_class_name}LMPredictionHead''' , re.sub('''Bert''' , _UpperCamelCase , _UpperCamelCase ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , _UpperCamelCase , overwrite_result=re.sub('''Bert''' , '''TestModel''' , _UpperCamelCase ) , )
def snake_case__( self : Optional[int] ) ->int:
snake_case_ = check_copies.LOCALIZED_READMES['''README_zh-hans.md''']
snake_case_ = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'''
''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'''
''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'''
''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'''
''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'''
''' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'''
''' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'''
''' method has been applied to compress GPT2 into'''
''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'''
''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'''
''' Multilingual BERT into'''
''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'''
''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'''
''' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'''
''' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'''
''' Luong, Quoc V. Le, Christopher D. Manning.'''
)
snake_case_ = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
snake_case_ = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'''
''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'''
''' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'''
''' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'''
''' method has been applied to compress GPT2 into'''
''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'''
''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'''
''' Multilingual BERT into'''
''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'''
''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'''
''' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'''
''' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'''
''' Christopher D. Manning 发布。\n'''
)
snake_case_, snake_case_ = check_copies.convert_to_localized_md(
_UpperCamelCase , _UpperCamelCase , localized_readme['''format_model_list'''] )
self.assertFalse(_UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
snake_case_, snake_case_ = check_copies.convert_to_localized_md(
_UpperCamelCase , _UpperCamelCase , localized_readme['''format_model_list'''] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(_UpperCamelCase )
snake_case_ = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'''
''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'''
''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'''
''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'''
)
snake_case_ = (
'''1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'''
''' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
snake_case_ = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
snake_case_, snake_case_ = check_copies.convert_to_localized_md(
_UpperCamelCase , _UpperCamelCase , localized_readme['''format_model_list'''] )
# Check if the model link is synchronized.
self.assertEqual(_UpperCamelCase , _UpperCamelCase ) | 39 |
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
snake_case_ = (boundary[1] - boundary[0]) / steps
snake_case_ = boundary[0]
snake_case_ = boundary[1]
snake_case_ = make_points(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ = 0.0
y += (h / 2.0) * f(SCREAMING_SNAKE_CASE__ )
for i in x_i:
# print(i)
y += h * f(SCREAMING_SNAKE_CASE__ )
y += (h / 2.0) * f(SCREAMING_SNAKE_CASE__ )
return y
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = a + h
while x < (b - h):
yield x
snake_case_ = x + h
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): # enter your function here
snake_case_ = (x - 0) * (x - 0)
return y
def __SCREAMING_SNAKE_CASE ():
snake_case_ = 0.0 # Lower bound of integration
snake_case_ = 1.0 # Upper bound of integration
snake_case_ = 10.0 # define number of steps or resolution
snake_case_ = [a, b] # define boundary of integration
snake_case_ = method_a(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
print(F'''y = {y}''' )
if __name__ == "__main__":
main() | 39 | 1 |
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
lowerCAmelCase_ = {
'''gwf-440k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/gwf-440k.ckpt''',
'''sample_rate''': 4_80_00,
'''sample_size''': 6_55_36,
},
'''jmann-small-190k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt''',
'''sample_rate''': 4_80_00,
'''sample_size''': 6_55_36,
},
'''jmann-large-580k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt''',
'''sample_rate''': 4_80_00,
'''sample_size''': 13_10_72,
},
'''maestro-uncond-150k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt''',
'''sample_rate''': 1_60_00,
'''sample_size''': 6_55_36,
},
'''unlocked-uncond-250k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt''',
'''sample_rate''': 1_60_00,
'''sample_size''': 6_55_36,
},
'''honk-140k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/honk-140k.ckpt''',
'''sample_rate''': 1_60_00,
'''sample_size''': 6_55_36,
},
}
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return torch.atana(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) / math.pi * 2
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = torch.sin(t * math.pi / 2 ) ** 2
snake_case_ = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
class snake_case_ ( __A ):
'''simple docstring'''
pass
class snake_case_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , _UpperCamelCase : int ) ->Optional[int]:
super().__init__()
snake_case_ = DiffusionAttnUnetaD(_UpperCamelCase , n_attn_layers=4 )
snake_case_ = deepcopy(self.diffusion )
snake_case_ = torch.quasirandom.SobolEngine(1 , scramble=_UpperCamelCase )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = MODELS_MAP[model_name]['''url''']
os.system(F'''wget {url} ./''' )
return F'''./{model_name}.ckpt'''
lowerCAmelCase_ = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
}
lowerCAmelCase_ = {
'''8''': '''resnets.0''',
'''9''': '''attentions.0''',
'''10''': '''resnets.1''',
'''11''': '''attentions.1''',
'''12''': '''resnets.2''',
'''13''': '''attentions.2''',
}
lowerCAmelCase_ = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
'''8''': '''resnets.3''',
'''9''': '''attentions.3''',
'''10''': '''resnets.4''',
'''11''': '''attentions.4''',
'''12''': '''resnets.5''',
'''13''': '''attentions.5''',
}
lowerCAmelCase_ = {
'''0''': '''resnets.0''',
'''1''': '''resnets.1''',
'''2''': '''resnets.2''',
'''4''': '''resnets.0''',
'''5''': '''resnets.1''',
'''6''': '''resnets.2''',
}
lowerCAmelCase_ = {
'''skip''': '''conv_skip''',
'''main.0''': '''conv_1''',
'''main.1''': '''group_norm_1''',
'''main.3''': '''conv_2''',
'''main.4''': '''group_norm_2''',
}
lowerCAmelCase_ = {
'''norm''': '''group_norm''',
'''qkv_proj''': ['''query''', '''key''', '''value'''],
'''out_proj''': ['''proj_attn'''],
}
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
if name.startswith('''skip''' ):
return name.replace('''skip''' , RES_CONV_MAP['''skip'''] )
# name has to be of format main.{digit}
if not name.startswith('''main.''' ):
raise ValueError(F'''ResConvBlock error with {name}''' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
for key, value in ATTN_MAP.items():
if name.startswith(SCREAMING_SNAKE_CASE__ ) and not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return name.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif name.startswith(SCREAMING_SNAKE_CASE__ ):
return [name.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for v in value]
raise ValueError(F'''Attn error with {name}''' )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=13 ):
snake_case_ = input_string
if string.split('''.''' )[0] == "timestep_embed":
return string.replace('''timestep_embed''' , '''time_proj''' )
snake_case_ = 0
if string.startswith('''net.3.''' ):
depth += 1
snake_case_ = string[6:]
elif string.startswith('''net.''' ):
snake_case_ = string[4:]
while string.startswith('''main.7.''' ):
depth += 1
snake_case_ = string[7:]
if string.startswith('''main.''' ):
snake_case_ = string[5:]
# mid block
if string[:2].isdigit():
snake_case_ = string[:2]
snake_case_ = string[2:]
else:
snake_case_ = string[0]
snake_case_ = string[1:]
if depth == max_depth:
snake_case_ = MID_NUM_TO_LAYER[layer_num]
snake_case_ = '''mid_block'''
elif depth > 0 and int(SCREAMING_SNAKE_CASE__ ) < 7:
snake_case_ = DOWN_NUM_TO_LAYER[layer_num]
snake_case_ = F'''down_blocks.{depth}'''
elif depth > 0 and int(SCREAMING_SNAKE_CASE__ ) > 7:
snake_case_ = UP_NUM_TO_LAYER[layer_num]
snake_case_ = F'''up_blocks.{max_depth - depth - 1}'''
elif depth == 0:
snake_case_ = DEPTH_0_TO_LAYER[layer_num]
snake_case_ = F'''up_blocks.{max_depth - 1}''' if int(SCREAMING_SNAKE_CASE__ ) > 3 else '''down_blocks.0'''
if not string_left.startswith('''.''' ):
raise ValueError(F'''Naming error with {input_string} and string_left: {string_left}.''' )
snake_case_ = string_left[1:]
if "resnets" in new_layer:
snake_case_ = convert_resconv_naming(SCREAMING_SNAKE_CASE__ )
elif "attentions" in new_layer:
snake_case_ = convert_attn_naming(SCREAMING_SNAKE_CASE__ )
snake_case_ = new_string_left
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = prefix + '''.''' + new_layer + '''.''' + string_left
else:
snake_case_ = [prefix + '''.''' + new_layer + '''.''' + s for s in string_left]
return new_string
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = {}
for k, v in state_dict.items():
if k.endswith('''kernel''' ):
# up- and downsample layers, don't have trainable weights
continue
snake_case_ = rename(SCREAMING_SNAKE_CASE__ )
# check if we need to transform from Conv => Linear for attention
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = transform_conv_attns(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
snake_case_ = v
return new_state_dict
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if len(SCREAMING_SNAKE_CASE__ ) == 1:
if len(v.shape ) == 3:
# weight
snake_case_ = v[:, :, 0]
else:
# bias
snake_case_ = v
else:
# qkv matrices
snake_case_ = v.shape[0]
snake_case_ = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
snake_case_ = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
snake_case_ = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
snake_case_ = args.model_path.split('''/''' )[-1].split('''.''' )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), F'''Make sure to provide one of the official model names {MODELS_MAP.keys()}'''
snake_case_ = download(SCREAMING_SNAKE_CASE__ )
snake_case_ = MODELS_MAP[model_name]['''sample_rate''']
snake_case_ = MODELS_MAP[model_name]['''sample_size''']
snake_case_ = Object()
snake_case_ = sample_size
snake_case_ = sample_rate
snake_case_ = 0
snake_case_ = UNetaDModel(sample_size=SCREAMING_SNAKE_CASE__ , sample_rate=SCREAMING_SNAKE_CASE__ )
snake_case_ = diffusers_model.state_dict()
snake_case_ = DiffusionUncond(SCREAMING_SNAKE_CASE__ )
orig_model.load_state_dict(torch.load(args.model_path , map_location=SCREAMING_SNAKE_CASE__ )['''state_dict'''] )
snake_case_ = orig_model.diffusion_ema.eval()
snake_case_ = orig_model.state_dict()
snake_case_ = rename_orig_weights(SCREAMING_SNAKE_CASE__ )
snake_case_ = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
snake_case_ = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(SCREAMING_SNAKE_CASE__ ) == 0, F'''Problem with {renamed_minus_diffusers}'''
assert all(k.endswith('''kernel''' ) for k in list(SCREAMING_SNAKE_CASE__ ) ), F'''Problem with {diffusers_minus_renamed}'''
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), F'''Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'''
if key == "time_proj.weight":
snake_case_ = value.squeeze()
snake_case_ = value
diffusers_model.load_state_dict(SCREAMING_SNAKE_CASE__ )
snake_case_ = 100
snake_case_ = 33
snake_case_ = IPNDMScheduler(num_train_timesteps=SCREAMING_SNAKE_CASE__ )
snake_case_ = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
snake_case_ = torch.randn([1, 2, config.sample_size] , generator=SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
snake_case_ = torch.linspace(1 , 0 , steps + 1 , device=SCREAMING_SNAKE_CASE__ )[:-1]
snake_case_ = get_crash_schedule(SCREAMING_SNAKE_CASE__ )
snake_case_ = DanceDiffusionPipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
snake_case_ = torch.manual_seed(33 )
snake_case_ = pipe(num_inference_steps=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ ).audios
snake_case_ = sampling.iplms_sample(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , {} )
snake_case_ = generated.clamp(-1 , 1 )
snake_case_ = (generated - audio).abs().sum()
snake_case_ = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print('''Diff sum''' , SCREAMING_SNAKE_CASE__ )
print('''Diff max''' , SCREAMING_SNAKE_CASE__ )
assert diff_max < 1E-3, F'''Diff max: {diff_max} is too much :-/'''
print(F'''Conversion for {model_name} successful!''' )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
lowerCAmelCase_ = parser.parse_args()
main(args) | 39 |
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
lowerCAmelCase_ = get_logger(__name__)
lowerCAmelCase_ = Path(__file__).parent / '''model_card_template.md'''
lowerCAmelCase_ = uuida().hex
lowerCAmelCase_ = os.getenv('''HF_HUB_OFFLINE''', '''''').upper() in ENV_VARS_TRUE_VALUES
lowerCAmelCase_ = os.getenv('''DISABLE_TELEMETRY''', '''''').upper() in ENV_VARS_TRUE_VALUES
lowerCAmelCase_ = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '''/api/telemetry/'''
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = None ):
snake_case_ = F'''diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}'''
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += F'''; torch/{_torch_version}'''
if is_flax_available():
ua += F'''; jax/{_jax_version}'''
ua += F'''; flax/{_flax_version}'''
if is_onnx_available():
ua += F'''; onnxruntime/{_onnxruntime_version}'''
# CI will set this value to True
if os.environ.get('''DIFFUSERS_IS_CI''' , '''''' ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
ua += "; " + "; ".join(F'''{k}/{v}''' for k, v in user_agent.items() )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
ua += "; " + user_agent
return ua
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None ):
if token is None:
snake_case_ = HfFolder.get_token()
if organization is None:
snake_case_ = whoami(SCREAMING_SNAKE_CASE__ )['''name''']
return F'''{username}/{model_id}'''
else:
return F'''{organization}/{model_id}'''
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if not is_jinja_available():
raise ValueError(
'''Modelcard rendering is based on Jinja templates.'''
''' Please make sure to have `jinja` installed before using `create_model_card`.'''
''' To install it, please run `pip install Jinja2`.''' )
if hasattr(SCREAMING_SNAKE_CASE__ , '''local_rank''' ) and args.local_rank not in [-1, 0]:
return
snake_case_ = args.hub_token if hasattr(SCREAMING_SNAKE_CASE__ , '''hub_token''' ) else None
snake_case_ = get_full_repo_name(SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ )
snake_case_ = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='''en''' , license='''apache-2.0''' , library_name='''diffusers''' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=SCREAMING_SNAKE_CASE__ , model_name=SCREAMING_SNAKE_CASE__ , repo_name=SCREAMING_SNAKE_CASE__ , dataset_name=args.dataset_name if hasattr(SCREAMING_SNAKE_CASE__ , '''dataset_name''' ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(SCREAMING_SNAKE_CASE__ , '''gradient_accumulation_steps''' ) else None
) , adam_betaa=args.adam_betaa if hasattr(SCREAMING_SNAKE_CASE__ , '''adam_beta1''' ) else None , adam_betaa=args.adam_betaa if hasattr(SCREAMING_SNAKE_CASE__ , '''adam_beta2''' ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(SCREAMING_SNAKE_CASE__ , '''adam_weight_decay''' ) else None , adam_epsilon=args.adam_epsilon if hasattr(SCREAMING_SNAKE_CASE__ , '''adam_epsilon''' ) else None , lr_scheduler=args.lr_scheduler if hasattr(SCREAMING_SNAKE_CASE__ , '''lr_scheduler''' ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(SCREAMING_SNAKE_CASE__ , '''lr_warmup_steps''' ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(SCREAMING_SNAKE_CASE__ , '''ema_inv_gamma''' ) else None , ema_power=args.ema_power if hasattr(SCREAMING_SNAKE_CASE__ , '''ema_power''' ) else None , ema_max_decay=args.ema_max_decay if hasattr(SCREAMING_SNAKE_CASE__ , '''ema_max_decay''' ) else None , mixed_precision=args.mixed_precision , )
snake_case_ = os.path.join(args.output_dir , '''README.md''' )
model_card.save(SCREAMING_SNAKE_CASE__ )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
if resolved_file is None or commit_hash is not None:
return commit_hash
snake_case_ = str(Path(SCREAMING_SNAKE_CASE__ ).as_posix() )
snake_case_ = re.search(R'''snapshots/([^/]+)/''' , SCREAMING_SNAKE_CASE__ )
if search is None:
return None
snake_case_ = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(SCREAMING_SNAKE_CASE__ ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
lowerCAmelCase_ = os.path.expanduser(
os.getenv('''HF_HOME''', os.path.join(os.getenv('''XDG_CACHE_HOME''', '''~/.cache'''), '''huggingface'''))
)
lowerCAmelCase_ = os.path.join(hf_cache_home, '''diffusers''')
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None ):
if new_cache_dir is None:
snake_case_ = DIFFUSERS_CACHE
if old_cache_dir is None:
snake_case_ = old_diffusers_cache
snake_case_ = Path(SCREAMING_SNAKE_CASE__ ).expanduser()
snake_case_ = Path(SCREAMING_SNAKE_CASE__ ).expanduser()
for old_blob_path in old_cache_dir.glob('''**/blobs/*''' ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
snake_case_ = new_cache_dir / old_blob_path.relative_to(SCREAMING_SNAKE_CASE__ )
new_blob_path.parent.mkdir(parents=SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
os.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
try:
os.symlink(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
except OSError:
logger.warning(
'''Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.''' )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
lowerCAmelCase_ = os.path.join(DIFFUSERS_CACHE, '''version_diffusers_cache.txt''')
if not os.path.isfile(cache_version_file):
lowerCAmelCase_ = 0
else:
with open(cache_version_file) as f:
try:
lowerCAmelCase_ = int(f.read())
except ValueError:
lowerCAmelCase_ = 0
if cache_version < 1:
lowerCAmelCase_ = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'''The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '''
'''existing cached models. This is a one-time operation, you can interrupt it or run it '''
'''later by calling `diffusers.utils.hub_utils.move_cache()`.'''
)
try:
move_cache()
except Exception as e:
lowerCAmelCase_ = '''\n'''.join(traceback.format_tb(e.__traceback__))
logger.error(
f"""There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease """
'''file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '''
'''message and we will do our best to help.'''
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, '''w''') as f:
f.write('''1''')
except Exception:
logger.warning(
f"""There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure """
'''the directory exists and can be written to.'''
)
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
if variant is not None:
snake_case_ = weights_name.split('''.''' )
snake_case_ = splits[:-1] + [variant] + splits[-1:]
snake_case_ = '''.'''.join(SCREAMING_SNAKE_CASE__ )
return weights_name
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , *,
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , ):
snake_case_ = str(SCREAMING_SNAKE_CASE__ )
if os.path.isfile(SCREAMING_SNAKE_CASE__ ):
return pretrained_model_name_or_path
elif os.path.isdir(SCREAMING_SNAKE_CASE__ ):
if os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ):
# Load from a PyTorch checkpoint
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ):
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return model_file
else:
raise EnvironmentError(
F'''Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.''' )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(SCREAMING_SNAKE_CASE__ ).base_version ) >= version.parse('''0.20.0''' )
):
try:
snake_case_ = hf_hub_download(
SCREAMING_SNAKE_CASE__ , filename=_add_variant(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , cache_dir=SCREAMING_SNAKE_CASE__ , force_download=SCREAMING_SNAKE_CASE__ , proxies=SCREAMING_SNAKE_CASE__ , resume_download=SCREAMING_SNAKE_CASE__ , local_files_only=SCREAMING_SNAKE_CASE__ , use_auth_token=SCREAMING_SNAKE_CASE__ , user_agent=SCREAMING_SNAKE_CASE__ , subfolder=SCREAMING_SNAKE_CASE__ , revision=revision or commit_hash , )
warnings.warn(
F'''Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.''' , SCREAMING_SNAKE_CASE__ , )
return model_file
except: # noqa: E722
warnings.warn(
F'''You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}\' so that the correct variant file can be added.''' , SCREAMING_SNAKE_CASE__ , )
try:
# 2. Load model file as usual
snake_case_ = hf_hub_download(
SCREAMING_SNAKE_CASE__ , filename=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , force_download=SCREAMING_SNAKE_CASE__ , proxies=SCREAMING_SNAKE_CASE__ , resume_download=SCREAMING_SNAKE_CASE__ , local_files_only=SCREAMING_SNAKE_CASE__ , use_auth_token=SCREAMING_SNAKE_CASE__ , user_agent=SCREAMING_SNAKE_CASE__ , subfolder=SCREAMING_SNAKE_CASE__ , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
F'''{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier '''
'''listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '''
'''token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '''
'''login`.''' )
except RevisionNotFoundError:
raise EnvironmentError(
F'''{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for '''
'''this model name. Check the model page at '''
F'''\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.''' )
except EntryNotFoundError:
raise EnvironmentError(
F'''{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.''' )
except HTTPError as err:
raise EnvironmentError(
F'''There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}''' )
except ValueError:
raise EnvironmentError(
F'''We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it'''
F''' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a'''
F''' directory containing a file named {weights_name} or'''
''' \nCheckout your internet connection or see how to run the library in'''
''' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.''' )
except EnvironmentError:
raise EnvironmentError(
F'''Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from '''
'''\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '''
F'''Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory '''
F'''containing a file named {weights_name}''' ) | 39 | 1 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowerCAmelCase_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
lowerCAmelCase_ = ''' \"""
Output class for the scheduler\'s step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"""
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
'''
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : Union[str, Any] ) ->Any:
snake_case_ = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/''' ) )
snake_case_ = self.diffusers_dir
shutil.copy(
os.path.join(_UpperCamelCase , '''src/diffusers/schedulers/scheduling_ddpm.py''' ) , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''' ) , )
def snake_case__( self : Optional[Any] ) ->Dict:
snake_case_ = '''src/diffusers'''
shutil.rmtree(self.diffusers_dir )
def snake_case__( self : Optional[Any] , _UpperCamelCase : int , _UpperCamelCase : List[Any] , _UpperCamelCase : str , _UpperCamelCase : int=None ) ->Optional[int]:
snake_case_ = comment + f'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
snake_case_ = comment + f'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
snake_case_ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_1_9 )
snake_case_ = black.format_str(_UpperCamelCase , mode=_UpperCamelCase )
snake_case_ = os.path.join(self.diffusers_dir , '''new_code.py''' )
with open(_UpperCamelCase , '''w''' , newline='''\n''' ) as f:
f.write(_UpperCamelCase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_UpperCamelCase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=_UpperCamelCase )
with open(_UpperCamelCase , '''r''' ) as f:
self.assertTrue(f.read() , _UpperCamelCase )
def snake_case__( self : Any ) ->Dict:
snake_case_ = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : List[str] ) ->List[Any]:
# Base copy consistency
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , _UpperCamelCase , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , _UpperCamelCase ) , )
# Copy consistency with a really long name
snake_case_ = '''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
f'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}''' , f'''{long_class_name}SchedulerOutput''' , re.sub('''Bert''' , _UpperCamelCase , _UpperCamelCase ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , _UpperCamelCase , overwrite_result=re.sub('''DDPM''' , '''Test''' , _UpperCamelCase ) , ) | 39 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''Intel/dpt-large''': '''https://huggingface.co/Intel/dpt-large/resolve/main/config.json''',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = "dpt"
def __init__( self : Optional[Any] , _UpperCamelCase : Tuple=7_6_8 , _UpperCamelCase : Dict=1_2 , _UpperCamelCase : Union[str, Any]=1_2 , _UpperCamelCase : List[Any]=3_0_7_2 , _UpperCamelCase : Dict="gelu" , _UpperCamelCase : Union[str, Any]=0.0 , _UpperCamelCase : Optional[int]=0.0 , _UpperCamelCase : Optional[int]=0.02 , _UpperCamelCase : List[str]=1e-12 , _UpperCamelCase : Any=3_8_4 , _UpperCamelCase : int=1_6 , _UpperCamelCase : Any=3 , _UpperCamelCase : Dict=False , _UpperCamelCase : str=True , _UpperCamelCase : Union[str, Any]=[2, 5, 8, 1_1] , _UpperCamelCase : List[str]="project" , _UpperCamelCase : Optional[int]=[4, 2, 1, 0.5] , _UpperCamelCase : Dict=[9_6, 1_9_2, 3_8_4, 7_6_8] , _UpperCamelCase : Dict=2_5_6 , _UpperCamelCase : Optional[Any]=-1 , _UpperCamelCase : int=False , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : str=0.4 , _UpperCamelCase : Tuple=2_5_5 , _UpperCamelCase : Union[str, Any]=0.1 , _UpperCamelCase : Tuple=[1, 1_0_2_4, 2_4, 2_4] , _UpperCamelCase : List[str]=[0, 1] , _UpperCamelCase : List[Any]=None , **_UpperCamelCase : Dict , ) ->Any:
super().__init__(**_UpperCamelCase )
snake_case_ = hidden_size
snake_case_ = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info('''Initializing the config with a `BiT` backbone.''' )
snake_case_ = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
}
snake_case_ = BitConfig(**_UpperCamelCase )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
logger.info('''Initializing the config with a `BiT` backbone.''' )
snake_case_ = BitConfig(**_UpperCamelCase )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
snake_case_ = backbone_config
else:
raise ValueError(
f'''backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.''' )
snake_case_ = backbone_featmap_shape
snake_case_ = neck_ignore_stages
if readout_type != "project":
raise ValueError('''Readout type must be \'project\' when using `DPT-hybrid` mode.''' )
else:
snake_case_ = None
snake_case_ = None
snake_case_ = []
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = qkv_bias
snake_case_ = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError('''Readout_type must be one of [\'ignore\', \'add\', \'project\']''' )
snake_case_ = readout_type
snake_case_ = reassemble_factors
snake_case_ = neck_hidden_sizes
snake_case_ = fusion_hidden_size
snake_case_ = head_in_index
snake_case_ = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
snake_case_ = use_auxiliary_head
snake_case_ = auxiliary_loss_weight
snake_case_ = semantic_loss_ignore_index
snake_case_ = semantic_classifier_dropout
def snake_case__( self : List[str] ) ->List[Any]:
snake_case_ = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
snake_case_ = self.backbone_config.to_dict()
snake_case_ = self.__class__.model_type
return output | 39 | 1 |
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
lowerCAmelCase_ = logging.get_logger(__name__)
class snake_case_ ( __A ):
'''simple docstring'''
def __init__( self : int , *_UpperCamelCase : str , **_UpperCamelCase : List[str] ) ->None:
warnings.warn(
'''The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DonutImageProcessor instead.''' , _UpperCamelCase , )
super().__init__(*_UpperCamelCase , **_UpperCamelCase ) | 39 |
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
lowerCAmelCase_ = logging.getLogger(__name__)
lowerCAmelCase_ = '''pytorch_model.bin'''
@dataclasses.dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = dataclasses.field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models."} )
SCREAMING_SNAKE_CASE : Optional[str] = dataclasses.field(
default=__A , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co."} , )
@dataclasses.dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = dataclasses.field(metadata={"help": "A csv or a json file containing the training data."} )
SCREAMING_SNAKE_CASE : str = dataclasses.field(metadata={"help": "A csv or a json file containing the data to predict on."} )
SCREAMING_SNAKE_CASE : Optional[str] = dataclasses.field(
default=__A , metadata={"help": "A csv or a json file containing the validation data."} )
SCREAMING_SNAKE_CASE : Optional[str] = dataclasses.field(
default=__A , metadata={"help": "The name of the task to train on."} , )
SCREAMING_SNAKE_CASE : Optional[List[str]] = dataclasses.field(
default=__A , metadata={"help": "The list of labels for the task."} )
@dataclasses.dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = dataclasses.field(
metadata={"help": "The output directory where the model predictions and checkpoints will be written."} )
SCREAMING_SNAKE_CASE : Optional[str] = dataclasses.field(
default="accuracy" , metadata={"help": "The evaluation metric used for the task."} )
SCREAMING_SNAKE_CASE : Optional[str] = dataclasses.field(
default="no" , metadata={
"help": "The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]"
} , )
SCREAMING_SNAKE_CASE : Optional[int] = dataclasses.field(
default=10 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
SCREAMING_SNAKE_CASE : Optional[float] = dataclasses.field(
default=0.0 , metadata={
"help": "How much the specified evaluation metric must improve to satisfy early stopping conditions."
} , )
SCREAMING_SNAKE_CASE : Optional[bool] = dataclasses.field(
default=__A , metadata={"help": "Whether to filter the pseudo-labeled data based on the confidence score."} , )
SCREAMING_SNAKE_CASE : Optional[bool] = dataclasses.field(
default=__A , metadata={"help": "Whether to filter the pseudo-labeled data based on the validation performance."} , )
SCREAMING_SNAKE_CASE : Optional[bool] = dataclasses.field(
default=__A , metadata={"help": "Whether to fine-tune on labeled data after pseudo training."} , )
SCREAMING_SNAKE_CASE : Optional[float] = dataclasses.field(
default=0.0 , metadata={"help": "Confidence threshold for pseudo-labeled data filtering."} , )
SCREAMING_SNAKE_CASE : Optional[int] = dataclasses.field(
default=100 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
SCREAMING_SNAKE_CASE : Optional[int] = dataclasses.field(
default=__A , metadata={"help": "Random seed for initialization."} , )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
snake_case_ = dataset.filter(lambda SCREAMING_SNAKE_CASE__ : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
snake_case_ = int(eval_result * len(SCREAMING_SNAKE_CASE__ ) )
print(SCREAMING_SNAKE_CASE__ )
snake_case_ = dataset.sort('''probability''' , reverse=SCREAMING_SNAKE_CASE__ )
snake_case_ = dataset.select(range(SCREAMING_SNAKE_CASE__ ) )
snake_case_ = dataset.remove_columns(['''label''', '''probability'''] )
snake_case_ = dataset.rename_column('''prediction''' , '''label''' )
snake_case_ = dataset.map(lambda SCREAMING_SNAKE_CASE__ : {"label": idalabel[example["label"]]} )
snake_case_ = dataset.shuffle(seed=args.seed )
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , F'''train_pseudo.{args.data_file_extension}''' )
if args.data_file_extension == "csv":
dataset.to_csv(SCREAMING_SNAKE_CASE__ , index=SCREAMING_SNAKE_CASE__ )
else:
dataset.to_json(SCREAMING_SNAKE_CASE__ )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
snake_case_ = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
snake_case_ = STModelArguments(model_name_or_path=SCREAMING_SNAKE_CASE__ )
snake_case_ = STDataArguments(train_file=SCREAMING_SNAKE_CASE__ , infer_file=SCREAMING_SNAKE_CASE__ )
snake_case_ = STTrainingArguments(output_dir=SCREAMING_SNAKE_CASE__ )
snake_case_ = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(SCREAMING_SNAKE_CASE__ ).items():
setattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for key, value in kwargs.items():
if hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
setattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Sanity checks
snake_case_ = {}
snake_case_ = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
snake_case_ = args.train_file
snake_case_ = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
snake_case_ = args.eval_file
for key in data_files:
snake_case_ = data_files[key].split('''.''' )[-1]
assert extension in ["csv", "json"], F'''`{key}_file` should be a csv or a json file.'''
if args.data_file_extension is None:
snake_case_ = extension
else:
assert extension == args.data_file_extension, F'''`{key}_file` should be a {args.data_file_extension} file`.'''
assert (
args.eval_metric in datasets.list_metrics()
), F'''{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.'''
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info('''Creating the initial data directory for self-training...''' )
snake_case_ = F'''{args.output_dir}/self-train_iter-{{}}'''.format
snake_case_ = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=SCREAMING_SNAKE_CASE__ )
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
snake_case_ = None
snake_case_ = None
snake_case_ = 0
snake_case_ = False
# Show the progress bar
snake_case_ = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
snake_case_ = data_dir_format(SCREAMING_SNAKE_CASE__ )
assert os.path.exists(SCREAMING_SNAKE_CASE__ )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''stage-1''' )
snake_case_ = {
'''accelerator''': accelerator,
'''model_name_or_path''': args.model_name_or_path,
'''cache_dir''': args.cache_dir,
'''do_train''': True,
'''train_file''': data_files['''train'''] if iteration == 0 else data_files['''train_pseudo'''],
'''do_eval''': True if args.eval_file is not None else False,
'''eval_file''': data_files['''eval'''],
'''do_predict''': True,
'''infer_file''': data_files['''infer'''],
'''task_name''': args.task_name,
'''label_list''': args.label_list,
'''output_dir''': current_output_dir,
'''eval_metric''': args.eval_metric,
'''evaluation_strategy''': args.evaluation_strategy,
'''early_stopping_patience''': args.early_stopping_patience,
'''early_stopping_threshold''': args.early_stopping_threshold,
'''seed''': args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
arguments_dict.update({key: value} )
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''best-checkpoint''' , SCREAMING_SNAKE_CASE__ )
if os.path.exists(SCREAMING_SNAKE_CASE__ ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.''' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 1 *****''' , SCREAMING_SNAKE_CASE__ )
finetune(**SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
assert os.path.exists(SCREAMING_SNAKE_CASE__ )
logger.info('''Self-training job completed: iteration: %d, stage: 1.''' , SCREAMING_SNAKE_CASE__ )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''best-checkpoint''' )
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''stage-2''' )
# Update arguments_dict
snake_case_ = model_path
snake_case_ = data_files['''train''']
snake_case_ = current_output_dir
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''best-checkpoint''' , SCREAMING_SNAKE_CASE__ )
if os.path.exists(SCREAMING_SNAKE_CASE__ ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.''' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 2 *****''' , SCREAMING_SNAKE_CASE__ )
finetune(**SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
assert os.path.exists(SCREAMING_SNAKE_CASE__ )
logger.info('''Self-training job completed: iteration: %d, stage: 2.''' , SCREAMING_SNAKE_CASE__ )
snake_case_ = iteration
snake_case_ = data_dir_format(iteration + 1 )
snake_case_ = AutoConfig.from_pretrained(os.path.join(SCREAMING_SNAKE_CASE__ , '''best-checkpoint''' ) )
snake_case_ = config.idalabel
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''eval_results_best-checkpoint.json''' )
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''test_results_best-checkpoint.json''' )
assert os.path.exists(SCREAMING_SNAKE_CASE__ )
with open(SCREAMING_SNAKE_CASE__ , '''r''' ) as f:
snake_case_ = float(json.load(SCREAMING_SNAKE_CASE__ )[args.eval_metric] )
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''infer_output_best-checkpoint.csv''' )
assert os.path.exists(SCREAMING_SNAKE_CASE__ )
# Loading the dataset from local csv or json files.
snake_case_ = load_dataset(args.data_file_extension , data_files={'''data''': data_files['''infer''']} )['''data''']
snake_case_ = load_dataset('''csv''' , data_files={'''data''': infer_output_file} )['''data''']
if accelerator.is_main_process:
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
shutil.copy(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , F'''eval_results_iter-{iteration}.json''' ) )
if os.path.exists(SCREAMING_SNAKE_CASE__ ):
shutil.copy(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , F'''test_results_iter-{iteration}.json''' ) )
create_pseudo_labeled_data(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , F'''train_pseudo.{args.data_file_extension}''' )
if args.evaluation_strategy != IntervalStrategy.NO.value:
snake_case_ = eval_result
if best_iteration is None:
snake_case_ = new_iteration
snake_case_ = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
snake_case_ = new_iteration
snake_case_ = new_eval_result
snake_case_ = 0
else:
if new_eval_result == best_eval_result:
snake_case_ = new_iteration
snake_case_ = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
snake_case_ = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info('''Best iteration: %d''' , SCREAMING_SNAKE_CASE__ )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(SCREAMING_SNAKE_CASE__ , F'''eval_results_iter-{iteration}.json''' ) , os.path.join(SCREAMING_SNAKE_CASE__ , '''eval_results_best-iteration.json''' ) , )
else:
# Assume that the last iteration is the best
logger.info('''Best iteration: %d''' , args.max_selftrain_iterations - 1 )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(SCREAMING_SNAKE_CASE__ , F'''eval_results_iter-{args.max_selftrain_iterations - 1}.json''' ) , os.path.join(SCREAMING_SNAKE_CASE__ , '''eval_results_best-iteration.json''' ) , ) | 39 | 1 |
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __SCREAMING_SNAKE_CASE ():
snake_case_ = {
'''repo_name''': ['''test_repo1''', '''test_repo2''', '''test_repo3'''],
'''path''': ['''test_1.py''', '''test_2.py''', '''unit_test.py'''],
'''content''': ['''a ''' * 20, '''a ''' * 30, '''b ''' * 7],
}
snake_case_ = Dataset.from_dict(SCREAMING_SNAKE_CASE__ )
return dataset
class snake_case_ ( __A ):
'''simple docstring'''
def snake_case__( self : Union[str, Any] ) ->str:
snake_case_ = get_dataset()
snake_case_ = make_duplicate_clusters(_UpperCamelCase , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def snake_case__( self : Dict ) ->List[str]:
snake_case_ = get_dataset()
snake_case_, snake_case_ = deduplicate_dataset(_UpperCamelCase )
self.assertEqual(len(_UpperCamelCase ) , 2 )
print(_UpperCamelCase )
self.assertEqual(duplicate_clusters[0][0]['''copies'''] , 2 )
self.assertEqual(duplicate_clusters[0][0]['''is_extreme'''] , _UpperCamelCase ) | 39 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case_ ( __A , __A , __A , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = AltDiffusionPipeline
SCREAMING_SNAKE_CASE : Dict = TEXT_TO_IMAGE_PARAMS
SCREAMING_SNAKE_CASE : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
SCREAMING_SNAKE_CASE : Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
SCREAMING_SNAKE_CASE : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
def snake_case__( self : Dict ) ->int:
torch.manual_seed(0 )
snake_case_ = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , )
snake_case_ = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_UpperCamelCase , set_alpha_to_one=_UpperCamelCase , )
torch.manual_seed(0 )
snake_case_ = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
snake_case_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_0_0_2 , )
snake_case_ = CLIPTextModel(_UpperCamelCase )
snake_case_ = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
snake_case_ = 7_7
snake_case_ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def snake_case__( self : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : Dict=0 ) ->Any:
if str(_UpperCamelCase ).startswith('''mps''' ):
snake_case_ = torch.manual_seed(_UpperCamelCase )
else:
snake_case_ = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
snake_case_ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def snake_case__( self : Dict ) ->List[str]:
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def snake_case__( self : List[str] ) ->Any:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def snake_case__( self : Dict ) ->Any:
snake_case_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case_ = self.get_dummy_components()
torch.manual_seed(0 )
snake_case_ = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_0_0_2 , )
# TODO: remove after fixing the non-deterministic text encoder
snake_case_ = RobertaSeriesModelWithTransformation(_UpperCamelCase )
snake_case_ = text_encoder
snake_case_ = AltDiffusionPipeline(**_UpperCamelCase )
snake_case_ = alt_pipe.to(_UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=_UpperCamelCase )
snake_case_ = self.get_dummy_inputs(_UpperCamelCase )
snake_case_ = '''A photo of an astronaut'''
snake_case_ = alt_pipe(**_UpperCamelCase )
snake_case_ = output.images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
snake_case_ = np.array(
[0.5748162, 0.60447145, 0.48821217, 0.50100636, 0.5431185, 0.45763683, 0.49657696, 0.48132733, 0.47573093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case__( self : Tuple ) ->Union[str, Any]:
snake_case_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case_ = self.get_dummy_components()
snake_case_ = PNDMScheduler(skip_prk_steps=_UpperCamelCase )
torch.manual_seed(0 )
snake_case_ = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_0_0_2 , )
# TODO: remove after fixing the non-deterministic text encoder
snake_case_ = RobertaSeriesModelWithTransformation(_UpperCamelCase )
snake_case_ = text_encoder
snake_case_ = AltDiffusionPipeline(**_UpperCamelCase )
snake_case_ = alt_pipe.to(_UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=_UpperCamelCase )
snake_case_ = self.get_dummy_inputs(_UpperCamelCase )
snake_case_ = alt_pipe(**_UpperCamelCase )
snake_case_ = output.images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
snake_case_ = np.array(
[0.51605093, 0.5707241, 0.47365507, 0.50578886, 0.5633877, 0.4642503, 0.5182081, 0.48763484, 0.49084237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : int ) ->List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__( self : List[str] ) ->Tuple:
# make sure here that pndm scheduler skips prk
snake_case_ = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , safety_checker=_UpperCamelCase )
snake_case_ = alt_pipe.to(_UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=_UpperCamelCase )
snake_case_ = '''A painting of a squirrel eating a burger'''
snake_case_ = torch.manual_seed(0 )
snake_case_ = alt_pipe([prompt] , generator=_UpperCamelCase , guidance_scale=6.0 , num_inference_steps=2_0 , output_type='''np''' )
snake_case_ = output.images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
snake_case_ = np.array([0.1010, 0.0800, 0.0794, 0.0885, 0.0843, 0.0762, 0.0769, 0.0729, 0.0586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case__( self : List[str] ) ->Optional[Any]:
snake_case_ = DDIMScheduler.from_pretrained('''BAAI/AltDiffusion''' , subfolder='''scheduler''' )
snake_case_ = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , scheduler=_UpperCamelCase , safety_checker=_UpperCamelCase )
snake_case_ = alt_pipe.to(_UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=_UpperCamelCase )
snake_case_ = '''A painting of a squirrel eating a burger'''
snake_case_ = torch.manual_seed(0 )
snake_case_ = alt_pipe([prompt] , generator=_UpperCamelCase , num_inference_steps=2 , output_type='''numpy''' )
snake_case_ = output.images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
snake_case_ = np.array([0.4019, 0.4052, 0.3810, 0.4119, 0.3916, 0.3982, 0.4651, 0.4195, 0.5323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 39 | 1 |
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case_ :
'''simple docstring'''
def __init__( self : Optional[int] , _UpperCamelCase : Tuple , _UpperCamelCase : Optional[int]=1_3 , _UpperCamelCase : str=7 , _UpperCamelCase : int=True , _UpperCamelCase : Dict=True , _UpperCamelCase : int=False , _UpperCamelCase : Dict=True , _UpperCamelCase : Optional[int]=9_9 , _UpperCamelCase : str=3_2 , _UpperCamelCase : str=5 , _UpperCamelCase : str=4 , _UpperCamelCase : int=3_7 , _UpperCamelCase : int="gelu" , _UpperCamelCase : List[str]=0.1 , _UpperCamelCase : Dict=0.1 , _UpperCamelCase : str=5_1_2 , _UpperCamelCase : Optional[int]=1_6 , _UpperCamelCase : List[str]=2 , _UpperCamelCase : Any=0.02 , _UpperCamelCase : List[str]=3 , _UpperCamelCase : List[str]=4 , _UpperCamelCase : str=None , ) ->Dict:
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = num_choices
snake_case_ = scope
def snake_case__( self : str ) ->List[Any]:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_input_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ = None
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__( self : List[str] ) ->Tuple:
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , )
def snake_case__( self : int , _UpperCamelCase : int , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Any , _UpperCamelCase : List[str] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Union[str, Any] ) ->Dict:
snake_case_ = BioGptModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase )
snake_case_ = model(_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__( self : Optional[Any] , _UpperCamelCase : Dict , _UpperCamelCase : List[str] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : Optional[int] , _UpperCamelCase : Union[str, Any] , ) ->Optional[int]:
snake_case_ = BioGptForCausalLM(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__( self : Dict , _UpperCamelCase : str , _UpperCamelCase : List[str] , _UpperCamelCase : List[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str , *_UpperCamelCase : List[Any] ) ->Union[str, Any]:
snake_case_ = BioGptModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
# create attention mask
snake_case_ = torch.ones(input_ids.shape , dtype=torch.long , device=_UpperCamelCase )
snake_case_ = self.seq_length // 2
snake_case_ = 0
# first forward pass
snake_case_, snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase ).to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case_ = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
snake_case_ = ids_tensor((1,) , _UpperCamelCase ).item() + 1
snake_case_ = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
snake_case_ = random_other_next_tokens
# append to next input_ids and attn_mask
snake_case_ = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case_ = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=_UpperCamelCase )] , dim=1 , )
# get two different outputs
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase )['''last_hidden_state''']
snake_case_ = model(_UpperCamelCase , past_key_values=_UpperCamelCase , attention_mask=_UpperCamelCase )['''last_hidden_state''']
# select random slice
snake_case_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case_ = output_from_no_past[:, -1, random_slice_idx].detach()
snake_case_ = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-3 ) )
def snake_case__( self : Union[str, Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Dict , *_UpperCamelCase : List[Any] ) ->int:
snake_case_ = BioGptModel(config=_UpperCamelCase ).to(_UpperCamelCase ).eval()
snake_case_ = torch.ones(input_ids.shape , dtype=torch.long , device=_UpperCamelCase )
# first forward pass
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase , use_cache=_UpperCamelCase )
snake_case_, snake_case_ = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
snake_case_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case_ = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
snake_case_ = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case_ = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase )['''last_hidden_state''']
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase , past_key_values=_UpperCamelCase )[
'''last_hidden_state'''
]
# select random slice
snake_case_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case_ = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-3 ) )
def snake_case__( self : int , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str , _UpperCamelCase : str , _UpperCamelCase : Dict , _UpperCamelCase : Optional[Any] , *_UpperCamelCase : List[Any] , _UpperCamelCase : List[str]=False ) ->Dict:
snake_case_ = BioGptForCausalLM(_UpperCamelCase )
model.to(_UpperCamelCase )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
snake_case_ = model(_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def snake_case__( self : List[Any] , _UpperCamelCase : Optional[int] , *_UpperCamelCase : Dict ) ->Dict:
snake_case_ = BioGptModel(_UpperCamelCase )
snake_case_ = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def snake_case__( self : Any , _UpperCamelCase : Tuple , _UpperCamelCase : List[str] , _UpperCamelCase : List[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : int , *_UpperCamelCase : List[str] ) ->int:
snake_case_ = self.num_labels
snake_case_ = BioGptForTokenClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case__( self : Optional[Any] ) ->int:
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
),
) = config_and_inputs
snake_case_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class snake_case_ ( __A , __A , __A , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE : Tuple = (BioGptForCausalLM,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE : Optional[Any] = (
{
"feature-extraction": BioGptModel,
"text-classification": BioGptForSequenceClassification,
"text-generation": BioGptForCausalLM,
"token-classification": BioGptForTokenClassification,
"zero-shot": BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE : Tuple = False
def snake_case__( self : List[str] ) ->Union[str, Any]:
snake_case_ = BioGptModelTester(self )
snake_case_ = ConfigTester(self , config_class=_UpperCamelCase , hidden_size=3_7 )
def snake_case__( self : str ) ->int:
self.config_tester.run_common_tests()
def snake_case__( self : str ) ->Tuple:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def snake_case__( self : Tuple ) ->List[Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case_ = type
self.model_tester.create_and_check_model(*_UpperCamelCase )
def snake_case__( self : Tuple ) ->str:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*_UpperCamelCase )
def snake_case__( self : Union[str, Any] ) ->Dict:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*_UpperCamelCase , gradient_checkpointing=_UpperCamelCase )
def snake_case__( self : Optional[int] ) ->List[Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*_UpperCamelCase )
def snake_case__( self : List[Any] ) ->Union[str, Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*_UpperCamelCase )
def snake_case__( self : Optional[int] ) ->Optional[int]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*_UpperCamelCase )
@slow
def snake_case__( self : int ) ->Optional[Any]:
snake_case_ = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(_UpperCamelCase )
snake_case_ = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
snake_case_ = '''left'''
# Define PAD Token = EOS Token = 50256
snake_case_ = tokenizer.eos_token
snake_case_ = model.config.eos_token_id
# use different length sentences to test batching
snake_case_ = [
'''Hello, my dog is a little''',
'''Today, I''',
]
snake_case_ = tokenizer(_UpperCamelCase , return_tensors='''pt''' , padding=_UpperCamelCase )
snake_case_ = inputs['''input_ids'''].to(_UpperCamelCase )
snake_case_ = model.generate(
input_ids=_UpperCamelCase , attention_mask=inputs['''attention_mask'''].to(_UpperCamelCase ) , )
snake_case_ = tokenizer(sentences[0] , return_tensors='''pt''' ).input_ids.to(_UpperCamelCase )
snake_case_ = model.generate(input_ids=_UpperCamelCase )
snake_case_ = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item()
snake_case_ = tokenizer(sentences[1] , return_tensors='''pt''' ).input_ids.to(_UpperCamelCase )
snake_case_ = model.generate(input_ids=_UpperCamelCase , max_length=model.config.max_length - num_paddings )
snake_case_ = tokenizer.batch_decode(_UpperCamelCase , skip_special_tokens=_UpperCamelCase )
snake_case_ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=_UpperCamelCase )
snake_case_ = tokenizer.decode(output_padded[0] , skip_special_tokens=_UpperCamelCase )
snake_case_ = [
'''Hello, my dog is a little bit bigger than a little bit.''',
'''Today, I have a good idea of how to use the information''',
]
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
self.assertListEqual(_UpperCamelCase , [non_padded_sentence, padded_sentence] )
@slow
def snake_case__( self : Optional[int] ) ->List[str]:
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = BioGptModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def snake_case__( self : Optional[int] ) ->str:
snake_case_, snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = 3
snake_case_ = input_dict['''input_ids''']
snake_case_ = input_ids.ne(1 ).to(_UpperCamelCase )
snake_case_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
snake_case_ = BioGptForSequenceClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase , labels=_UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case__( self : str ) ->str:
snake_case_, snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = 3
snake_case_ = '''multi_label_classification'''
snake_case_ = input_dict['''input_ids''']
snake_case_ = input_ids.ne(1 ).to(_UpperCamelCase )
snake_case_ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
snake_case_ = BioGptForSequenceClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase , labels=_UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case__( self : int ) ->Any:
snake_case_ = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
snake_case_ = torch.tensor([[2, 4_8_0_5, 9, 6_5_6, 2_1]] )
snake_case_ = model(_UpperCamelCase )[0]
snake_case_ = 4_2_3_8_4
snake_case_ = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , _UpperCamelCase )
snake_case_ = torch.tensor(
[[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCamelCase , atol=1e-4 ) )
@slow
def snake_case__( self : List[str] ) ->Optional[int]:
snake_case_ = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
snake_case_ = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(_UpperCamelCase )
torch.manual_seed(0 )
snake_case_ = tokenizer('''COVID-19 is''' , return_tensors='''pt''' ).to(_UpperCamelCase )
snake_case_ = model.generate(
**_UpperCamelCase , min_length=1_0_0 , max_length=1_0_2_4 , num_beams=5 , early_stopping=_UpperCamelCase , )
snake_case_ = tokenizer.decode(output_ids[0] , skip_special_tokens=_UpperCamelCase )
snake_case_ = (
'''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'''
''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'''
''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'''
''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'''
''' more than 800,000 deaths.'''
)
self.assertEqual(_UpperCamelCase , _UpperCamelCase ) | 39 |
from math import factorial
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError('''Please enter positive integers for n and k where n >= k''' )
return factorial(SCREAMING_SNAKE_CASE__ ) // (factorial(SCREAMING_SNAKE_CASE__ ) * factorial(n - k ))
if __name__ == "__main__":
print(
'''The number of five-card hands possible from a standard''',
f"""fifty-two card deck is: {combinations(52, 5)}\n""",
)
print(
'''If a class of 40 students must be arranged into groups of''',
f"""4 for group projects, there are {combinations(40, 4)} ways""",
'''to arrange them.\n''',
)
print(
'''If 10 teams are competing in a Formula One race, there''',
f"""are {combinations(10, 3)} ways that first, second and""",
'''third place can be awarded.''',
) | 39 | 1 |
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = os.path.join(args.tf_model_dir , '''parameters.json''' )
snake_case_ = json.loads(open(SCREAMING_SNAKE_CASE__ ).read() )
if not params:
raise ValueError(
F'''It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.''' )
if not args.output.endswith('''.pt''' ):
snake_case_ = args.output + '''.pt'''
snake_case_ = OrderedDict()
with tf.device('''/CPU:0''' ):
snake_case_ = tf.train.load_checkpoint(args.tf_model_dir )
snake_case_ = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
snake_case_ = reader.get_tensor(SCREAMING_SNAKE_CASE__ ).astype(np.floataa )
if key_name.endswith('''/adam_m''' ) or key_name.endswith('''/adam_v''' ):
continue
if key_name.startswith('''pasts/''' ):
if key_name.startswith('''pasts/mlp''' ):
snake_case_ = int(key_name[9] )
elif key_name.startswith('''pasts/out''' ):
snake_case_ = 8
snake_case_ = '''model.sqout.%d.weight''' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
snake_case_ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ = torch.tensor(SCREAMING_SNAKE_CASE__ )
elif key_name.startswith('''model/moe''' ):
snake_case_ = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/switch_gating/kernel''' ):
snake_case_ = '''model.blocks.%d.feed_forward.mlp.router.classifier.weight''' % player
snake_case_ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ = torch.tensor(SCREAMING_SNAKE_CASE__ )
elif key_name.endswith('''/softmlp/kernel''' ):
snake_case_ = '''model.blocks.%d.feed_forward.soft_bypass_mlp.weight''' % player
snake_case_ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ = torch.tensor(SCREAMING_SNAKE_CASE__ )
elif key_name.endswith('''/wo/kernel''' ) or key_name.endswith('''/wi/kernel''' ):
snake_case_ = key_name[-9:-7]
for i in range(16 ):
snake_case_ = '''model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight''' % (player, i, nlayer)
snake_case_ = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
snake_case_ = torch.tensor(SCREAMING_SNAKE_CASE__ )
elif key_name.startswith('''model/mlp''' ):
snake_case_ = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/p1/kernel''' ):
snake_case_ = '''model.blocks.%d.feed_forward.mlp.wi.weight''' % player
snake_case_ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ = torch.tensor(SCREAMING_SNAKE_CASE__ )
elif key_name.endswith('''/p1/bias''' ):
snake_case_ = '''model.blocks.%d.feed_forward.mlp.wi.bias''' % player
snake_case_ = vnp.copy() # same because it is one dimensional
snake_case_ = torch.tensor(SCREAMING_SNAKE_CASE__ )
elif key_name.endswith('''/p2/kernel''' ):
snake_case_ = '''model.blocks.%d.feed_forward.mlp.wo.weight''' % player
snake_case_ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ = torch.tensor(SCREAMING_SNAKE_CASE__ )
elif key_name.endswith('''/p2/bias''' ):
snake_case_ = '''model.blocks.%d.feed_forward.mlp.wo.bias''' % player
snake_case_ = vnp.copy() # same because it is one dimensional
snake_case_ = torch.tensor(SCREAMING_SNAKE_CASE__ )
elif key_name.startswith('''model/ln''' ):
snake_case_ = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
snake_case_ = '''model.blocks.%d.feed_forward.norm.bias''' % player
snake_case_ = vnp.copy() # same because it is one dimensional
snake_case_ = torch.tensor(SCREAMING_SNAKE_CASE__ )
elif key_name.endswith('''/g''' ):
snake_case_ = '''model.blocks.%d.feed_forward.norm.weight''' % player
snake_case_ = vnp.copy() # same because it is one dimensional
snake_case_ = torch.tensor(SCREAMING_SNAKE_CASE__ )
elif key_name.startswith('''model/att''' ):
snake_case_ = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/qkv/kernel''' ):
snake_case_ = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
snake_case_ = state[:, 0, :, :]
snake_case_ = state[:, 1, :, :]
snake_case_ = state[:, 2, :, :]
snake_case_ = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
snake_case_ = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
snake_case_ = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
snake_case_ = '''model.blocks.%d.self_attn.self_attn.q_proj.weight''' % player
snake_case_ = torch.tensor(SCREAMING_SNAKE_CASE__ )
snake_case_ = '''model.blocks.%d.self_attn.self_attn.k_proj.weight''' % player
snake_case_ = torch.tensor(SCREAMING_SNAKE_CASE__ )
snake_case_ = '''model.blocks.%d.self_attn.self_attn.v_proj.weight''' % player
snake_case_ = torch.tensor(SCREAMING_SNAKE_CASE__ )
elif key_name.endswith('''/o/kernel''' ):
snake_case_ = '''model.blocks.%d.self_attn.self_attn.out_proj.weight''' % player
snake_case_ = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
snake_case_ = torch.tensor(SCREAMING_SNAKE_CASE__ )
elif key_name.startswith('''model/an''' ):
snake_case_ = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
snake_case_ = '''model.blocks.%d.self_attn.norm.bias''' % player
snake_case_ = vnp.copy() # same because it is one dimensional
snake_case_ = torch.tensor(SCREAMING_SNAKE_CASE__ )
elif key_name.endswith('''/g''' ):
snake_case_ = '''model.blocks.%d.self_attn.norm.weight''' % player
snake_case_ = vnp.copy() # same because it is one dimensional
snake_case_ = torch.tensor(SCREAMING_SNAKE_CASE__ )
elif (
key_name.startswith('''model/wte''' )
or key_name.startswith('''model/wpe''' )
or key_name.startswith('''model/ete''' )
):
snake_case_ = {'''wte''': '''embed_tokens''', '''wpe''': '''position_embeddings''', '''ete''': '''extra_position_embeddings'''}[
key_name[-3:]
]
snake_case_ = '''model.%s.weight''' % nlayer
snake_case_ = vnp.copy() # same in embedded
snake_case_ = torch.tensor(SCREAMING_SNAKE_CASE__ )
if key_name.startswith('''model/wte''' ):
snake_case_ = '''lm_head.weight'''
snake_case_ = vnp.copy() # same in embedded
snake_case_ = torch.tensor(SCREAMING_SNAKE_CASE__ )
elif key_name.startswith('''model/wob''' ):
snake_case_ = '''final_logits_bias'''
snake_case_ = vnp.copy() # same in embedded
snake_case_ = state.reshape((1, -1) )
snake_case_ = torch.tensor(SCREAMING_SNAKE_CASE__ )
elif key_name == "model/dense/kernel":
snake_case_ = '''model.last_project.weight'''
snake_case_ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ = torch.tensor(SCREAMING_SNAKE_CASE__ )
elif key_name == "model/dense_1/bias":
snake_case_ = '''model.last_project.bias'''
snake_case_ = vnp.copy() # same because it is one dimensional
snake_case_ = torch.tensor(SCREAMING_SNAKE_CASE__ )
torch.save(SCREAMING_SNAKE_CASE__ , args.output )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser(
description='''model converter.''', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('''--tf_model_dir''', metavar='''PATH''', type=str, required=True, help='''import model''')
parser.add_argument('''--output''', metavar='''PATH''', type=str, required=True, help='''output model''')
lowerCAmelCase_ = parser.parse_args()
convert_tf_gptsan_to_pt(args) | 39 |
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
lowerCAmelCase_ = sys.version_info >= (3, 10)
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None ):
return field(default_factory=lambda: default , metadata=SCREAMING_SNAKE_CASE__ )
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : float
SCREAMING_SNAKE_CASE : str
SCREAMING_SNAKE_CASE : bool
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = 42
SCREAMING_SNAKE_CASE : str = field(default="toto" , metadata={"help": "help message"} )
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : bool = False
SCREAMING_SNAKE_CASE : bool = True
SCREAMING_SNAKE_CASE : Optional[bool] = None
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = "titi"
SCREAMING_SNAKE_CASE : Any = "toto"
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = "titi"
SCREAMING_SNAKE_CASE : Optional[Any] = "toto"
SCREAMING_SNAKE_CASE : Any = 42
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : BasicEnum = "toto"
def snake_case__( self : Tuple ) ->List[str]:
snake_case_ = BasicEnum(self.foo )
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : MixedTypeEnum = "toto"
def snake_case__( self : Union[str, Any] ) ->Dict:
snake_case_ = MixedTypeEnum(self.foo )
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Optional[float] = field(default=__A , metadata={"help": "help message"} )
SCREAMING_SNAKE_CASE : Optional[str] = None
SCREAMING_SNAKE_CASE : Optional[List[str]] = list_field(default=[] )
SCREAMING_SNAKE_CASE : Optional[List[int]] = list_field(default=[] )
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[int] = list_field(default=[] )
SCREAMING_SNAKE_CASE : List[int] = list_field(default=[1, 2, 3] )
SCREAMING_SNAKE_CASE : List[str] = list_field(default=["Hallo", "Bonjour", "Hello"] )
SCREAMING_SNAKE_CASE : List[float] = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[int] = field()
SCREAMING_SNAKE_CASE : str = field()
SCREAMING_SNAKE_CASE : BasicEnum = field()
def snake_case__( self : Optional[Any] ) ->Tuple:
snake_case_ = BasicEnum(self.required_enum )
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : "BasicEnum" = field()
SCREAMING_SNAKE_CASE : "Optional[bool]" = None
SCREAMING_SNAKE_CASE : "str" = field(default="toto" , metadata={"help": "help message"} )
SCREAMING_SNAKE_CASE : "List[str]" = list_field(default=["Hallo", "Bonjour", "Hello"] )
if is_python_no_less_than_3_10:
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : bool = False
SCREAMING_SNAKE_CASE : bool = True
SCREAMING_SNAKE_CASE : bool | None = None
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : int | None = None
SCREAMING_SNAKE_CASE : float | None = field(default=__A , metadata={"help": "help message"} )
SCREAMING_SNAKE_CASE : str | None = None
SCREAMING_SNAKE_CASE : list[str] | None = list_field(default=[] )
SCREAMING_SNAKE_CASE : list[int] | None = list_field(default=[] )
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : Dict , _UpperCamelCase : argparse.ArgumentParser , _UpperCamelCase : argparse.ArgumentParser ) ->str:
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
snake_case_ = {k: v for k, v in vars(_UpperCamelCase ).items() if k != '''container'''}
snake_case_ = {k: v for k, v in vars(_UpperCamelCase ).items() if k != '''container'''}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('''choices''' , _UpperCamelCase ) and yy.get('''choices''' , _UpperCamelCase ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['''type'''](_UpperCamelCase ) , yy['''type'''](_UpperCamelCase ) )
del xx["type"], yy["type"]
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : Optional[Any] ) ->Dict:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_UpperCamelCase , required=_UpperCamelCase )
expected.add_argument('''--bar''' , type=_UpperCamelCase , required=_UpperCamelCase )
expected.add_argument('''--baz''' , type=_UpperCamelCase , required=_UpperCamelCase )
expected.add_argument('''--flag''' , type=_UpperCamelCase , default=_UpperCamelCase , const=_UpperCamelCase , nargs='''?''' )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
snake_case_ = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5''']
((snake_case_), ) = parser.parse_args_into_dataclasses(_UpperCamelCase , look_for_args_file=_UpperCamelCase )
self.assertFalse(example.flag )
def snake_case__( self : Tuple ) ->Optional[int]:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=4_2 , type=_UpperCamelCase )
expected.add_argument('''--baz''' , default='''toto''' , type=_UpperCamelCase , help='''help message''' )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : Tuple ) ->Tuple:
snake_case_ = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_UpperCamelCase , default=_UpperCamelCase , const=_UpperCamelCase , nargs='''?''' )
expected.add_argument('''--baz''' , type=_UpperCamelCase , default=_UpperCamelCase , const=_UpperCamelCase , nargs='''?''' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('''--no_baz''' , action='''store_false''' , default=_UpperCamelCase , dest='''baz''' )
expected.add_argument('''--opt''' , type=_UpperCamelCase , default=_UpperCamelCase )
snake_case_ = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_UpperCamelCase )
for dataclass_type in dataclass_types:
snake_case_ = HfArgumentParser(_UpperCamelCase )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
snake_case_ = parser.parse_args([] )
self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , baz=_UpperCamelCase , opt=_UpperCamelCase ) )
snake_case_ = parser.parse_args(['''--foo''', '''--no_baz'''] )
self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , baz=_UpperCamelCase , opt=_UpperCamelCase ) )
snake_case_ = parser.parse_args(['''--foo''', '''--baz'''] )
self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , baz=_UpperCamelCase , opt=_UpperCamelCase ) )
snake_case_ = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] )
self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , baz=_UpperCamelCase , opt=_UpperCamelCase ) )
snake_case_ = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] )
self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , baz=_UpperCamelCase , opt=_UpperCamelCase ) )
def snake_case__( self : Tuple ) ->Tuple:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 4_2] , type=make_choice_type_function(['''titi''', '''toto''', 4_2] ) , )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
snake_case_ = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
snake_case_ = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
snake_case_ = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
snake_case_ = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
snake_case_ = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 4_2 )
snake_case_ = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def snake_case__( self : Tuple ) ->Union[str, Any]:
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : Literal["titi", "toto", 42] = "toto"
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 4_2) , type=make_choice_type_function(['''titi''', '''toto''', 4_2] ) , )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
snake_case_ = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
snake_case_ = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
snake_case_ = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 4_2 )
def snake_case__( self : List[str] ) ->int:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = argparse.ArgumentParser()
expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=_UpperCamelCase )
expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=_UpperCamelCase )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_UpperCamelCase )
expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=_UpperCamelCase )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
snake_case_ = parser.parse_args([] )
self.assertEqual(
_UpperCamelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , )
snake_case_ = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() )
self.assertEqual(_UpperCamelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) )
def snake_case__( self : Optional[Any] ) ->List[Any]:
snake_case_ = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=_UpperCamelCase , type=_UpperCamelCase )
expected.add_argument('''--bar''' , default=_UpperCamelCase , type=_UpperCamelCase , help='''help message''' )
expected.add_argument('''--baz''' , default=_UpperCamelCase , type=_UpperCamelCase )
expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=_UpperCamelCase )
expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=_UpperCamelCase )
snake_case_ = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_UpperCamelCase )
for dataclass_type in dataclass_types:
snake_case_ = HfArgumentParser(_UpperCamelCase )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
snake_case_ = parser.parse_args([] )
self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , bar=_UpperCamelCase , baz=_UpperCamelCase , ces=[] , des=[] ) )
snake_case_ = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() )
self.assertEqual(_UpperCamelCase , Namespace(foo=1_2 , bar=3.14 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) )
def snake_case__( self : Union[str, Any] ) ->Optional[int]:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = argparse.ArgumentParser()
expected.add_argument('''--required_list''' , nargs='''+''' , type=_UpperCamelCase , required=_UpperCamelCase )
expected.add_argument('''--required_str''' , type=_UpperCamelCase , required=_UpperCamelCase )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_UpperCamelCase , )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : List[str] ) ->int:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_UpperCamelCase , required=_UpperCamelCase )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_UpperCamelCase , )
expected.add_argument('''--opt''' , type=_UpperCamelCase , default=_UpperCamelCase )
expected.add_argument('''--baz''' , default='''toto''' , type=_UpperCamelCase , help='''help message''' )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_UpperCamelCase )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : Dict ) ->Any:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = {
'''foo''': 1_2,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
snake_case_ = parser.parse_dict(_UpperCamelCase )[0]
snake_case_ = BasicExample(**_UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : int ) ->Dict:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = {
'''foo''': 1_2,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
'''extra''': 4_2,
}
self.assertRaises(_UpperCamelCase , parser.parse_dict , _UpperCamelCase , allow_extra_keys=_UpperCamelCase )
def snake_case__( self : str ) ->Tuple:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = {
'''foo''': 1_2,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ = os.path.join(_UpperCamelCase , '''temp_json''' )
os.mkdir(_UpperCamelCase )
with open(temp_local_path + '''.json''' , '''w+''' ) as f:
json.dump(_UpperCamelCase , _UpperCamelCase )
snake_case_ = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0]
snake_case_ = BasicExample(**_UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : Optional[int] ) ->str:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = {
'''foo''': 1_2,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ = os.path.join(_UpperCamelCase , '''temp_yaml''' )
os.mkdir(_UpperCamelCase )
with open(temp_local_path + '''.yaml''' , '''w+''' ) as f:
yaml.dump(_UpperCamelCase , _UpperCamelCase )
snake_case_ = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0]
snake_case_ = BasicExample(**_UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : Any ) ->Any:
snake_case_ = HfArgumentParser(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase ) | 39 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase_ = {
'''configuration_mobilenet_v2''': [
'''MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''MobileNetV2Config''',
'''MobileNetV2OnnxConfig''',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''MobileNetV2FeatureExtractor''']
lowerCAmelCase_ = ['''MobileNetV2ImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileNetV2ForImageClassification''',
'''MobileNetV2ForSemanticSegmentation''',
'''MobileNetV2Model''',
'''MobileNetV2PreTrainedModel''',
'''load_tf_weights_in_mobilenet_v2''',
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 39 |
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
lowerCAmelCase_ = logging.get_logger(__name__)
class snake_case_ ( __A ):
'''simple docstring'''
def __init__( self : Dict , *_UpperCamelCase : int , **_UpperCamelCase : Tuple ) ->None:
warnings.warn(
'''The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use ChineseCLIPImageProcessor instead.''' , _UpperCamelCase , )
super().__init__(*_UpperCamelCase , **_UpperCamelCase ) | 39 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class snake_case_ ( __A , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = KandinskyVaaControlnetImgaImgPipeline
SCREAMING_SNAKE_CASE : int = ["image_embeds", "negative_image_embeds", "image", "hint"]
SCREAMING_SNAKE_CASE : List[str] = ["image_embeds", "negative_image_embeds", "image", "hint"]
SCREAMING_SNAKE_CASE : Any = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
SCREAMING_SNAKE_CASE : Tuple = False
@property
def snake_case__( self : Optional[Any] ) ->Tuple:
return 3_2
@property
def snake_case__( self : Dict ) ->Tuple:
return 3_2
@property
def snake_case__( self : List[str] ) ->Dict:
return self.time_input_dim
@property
def snake_case__( self : Optional[Any] ) ->List[str]:
return self.time_input_dim * 4
@property
def snake_case__( self : Any ) ->Optional[Any]:
return 1_0_0
@property
def snake_case__( self : Union[str, Any] ) ->str:
torch.manual_seed(0 )
snake_case_ = {
'''in_channels''': 8,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image_hint''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
snake_case_ = UNetaDConditionModel(**_UpperCamelCase )
return model
@property
def snake_case__( self : Any ) ->Optional[int]:
return {
"block_out_channels": [3_2, 3_2, 6_4, 6_4],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def snake_case__( self : int ) ->int:
torch.manual_seed(0 )
snake_case_ = VQModel(**self.dummy_movq_kwargs )
return model
def snake_case__( self : Optional[Any] ) ->Union[str, Any]:
snake_case_ = self.dummy_unet
snake_case_ = self.dummy_movq
snake_case_ = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.00085,
'''beta_end''': 0.012,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
snake_case_ = DDIMScheduler(**_UpperCamelCase )
snake_case_ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def snake_case__( self : Dict , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Union[str, Any]=0 ) ->List[str]:
snake_case_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
snake_case_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_UpperCamelCase )
# create init_image
snake_case_ = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
snake_case_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case_ = Image.fromarray(np.uinta(_UpperCamelCase ) ).convert('''RGB''' ).resize((2_5_6, 2_5_6) )
# create hint
snake_case_ = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
if str(_UpperCamelCase ).startswith('''mps''' ):
snake_case_ = torch.manual_seed(_UpperCamelCase )
else:
snake_case_ = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
snake_case_ = {
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''hint''': hint,
'''generator''': generator,
'''height''': 6_4,
'''width''': 6_4,
'''num_inference_steps''': 1_0,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def snake_case__( self : Tuple ) ->Optional[Any]:
snake_case_ = '''cpu'''
snake_case_ = self.get_dummy_components()
snake_case_ = self.pipeline_class(**_UpperCamelCase )
snake_case_ = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
snake_case_ = pipe(**self.get_dummy_inputs(_UpperCamelCase ) )
snake_case_ = output.images
snake_case_ = pipe(
**self.get_dummy_inputs(_UpperCamelCase ) , return_dict=_UpperCamelCase , )[0]
snake_case_ = image[0, -3:, -3:, -1]
snake_case_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
snake_case_ = np.array(
[0.54985034, 0.55509365, 0.52561504, 0.5570494, 0.5593818, 0.5263979, 0.50285643, 0.5069846, 0.51196736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : Tuple ) ->List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__( self : Tuple ) ->Union[str, Any]:
snake_case_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy''' )
snake_case_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
snake_case_ = init_image.resize((5_1_2, 5_1_2) )
snake_case_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/hint_image_cat.png''' )
snake_case_ = torch.from_numpy(np.array(_UpperCamelCase ) ).float() / 255.0
snake_case_ = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
snake_case_ = '''A robot, 4k photo'''
snake_case_ = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_UpperCamelCase )
snake_case_ = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-controlnet-depth''' , torch_dtype=torch.floataa )
snake_case_ = pipeline.to(_UpperCamelCase )
pipeline.set_progress_bar_config(disable=_UpperCamelCase )
snake_case_ = torch.Generator(device='''cpu''' ).manual_seed(0 )
snake_case_, snake_case_ = pipe_prior(
_UpperCamelCase , image=_UpperCamelCase , strength=0.85 , generator=_UpperCamelCase , negative_prompt='''''' , ).to_tuple()
snake_case_ = pipeline(
image=_UpperCamelCase , image_embeds=_UpperCamelCase , negative_image_embeds=_UpperCamelCase , hint=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=1_0_0 , height=5_1_2 , width=5_1_2 , strength=0.5 , output_type='''np''' , )
snake_case_ = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase ) | 39 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = "rwkv"
SCREAMING_SNAKE_CASE : Any = {"max_position_embeddings": "context_length"}
def __init__( self : Union[str, Any] , _UpperCamelCase : Any=5_0_2_7_7 , _UpperCamelCase : Optional[int]=1_0_2_4 , _UpperCamelCase : Optional[int]=4_0_9_6 , _UpperCamelCase : str=3_2 , _UpperCamelCase : Tuple=None , _UpperCamelCase : Dict=None , _UpperCamelCase : Optional[int]=1e-5 , _UpperCamelCase : Any=0 , _UpperCamelCase : Optional[Any]=0 , _UpperCamelCase : int=6 , _UpperCamelCase : Dict=False , _UpperCamelCase : Optional[int]=True , **_UpperCamelCase : int , ) ->List[str]:
snake_case_ = vocab_size
snake_case_ = context_length
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = attention_hidden_size if attention_hidden_size is not None else hidden_size
snake_case_ = intermediate_size if intermediate_size is not None else 4 * hidden_size
snake_case_ = layer_norm_epsilon
snake_case_ = rescale_every
snake_case_ = use_cache
snake_case_ = bos_token_id
snake_case_ = eos_token_id
super().__init__(
tie_word_embeddings=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase ) | 39 | 1 |
import re
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = re.compile(
R'''^(?:0|94|\+94|0{2}94)''' R'''7(0|1|2|4|5|6|7|8)''' R'''(-| |)''' R'''\d{7}$''' )
return bool(re.search(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
if __name__ == "__main__":
lowerCAmelCase_ = '''0094702343221'''
print(is_sri_lankan_phone_number(phone)) | 39 |
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
lowerCAmelCase_ = get_logger(__name__)
class snake_case_ :
'''simple docstring'''
def __init__( self : int , _UpperCamelCase : Optional[str] = None ) ->Tuple:
snake_case_ = (
os.path.join(_UpperCamelCase , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
snake_case_ = Extractor
def snake_case__( self : Any , _UpperCamelCase : str ) ->str:
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
snake_case_ = os.path.abspath(_UpperCamelCase )
return os.path.join(self.extract_dir , hash_url_to_filename(_UpperCamelCase ) )
def snake_case__( self : int , _UpperCamelCase : str , _UpperCamelCase : bool ) ->bool:
return force_extract or (
not os.path.isfile(_UpperCamelCase ) and not (os.path.isdir(_UpperCamelCase ) and os.listdir(_UpperCamelCase ))
)
def snake_case__( self : Tuple , _UpperCamelCase : str , _UpperCamelCase : bool = False ) ->str:
snake_case_ = self.extractor.infer_extractor_format(_UpperCamelCase )
if not extractor_format:
return input_path
snake_case_ = self._get_output_path(_UpperCamelCase )
if self._do_extract(_UpperCamelCase , _UpperCamelCase ):
self.extractor.extract(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return output_path
class snake_case_ ( __A ):
'''simple docstring'''
@classmethod
@abstractmethod
def snake_case__( cls : Optional[int] , _UpperCamelCase : Union[Path, str] , **_UpperCamelCase : str ) ->bool:
...
@staticmethod
@abstractmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
...
class snake_case_ ( __A , __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[bytes] = []
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : int ) ->List[Any]:
with open(_UpperCamelCase , '''rb''' ) as f:
return f.read(_UpperCamelCase )
@classmethod
def snake_case__( cls : Union[str, Any] , _UpperCamelCase : Union[Path, str] , _UpperCamelCase : bytes = b"" ) ->bool:
if not magic_number:
snake_case_ = max(len(_UpperCamelCase ) for cls_magic_number in cls.magic_numbers )
try:
snake_case_ = cls.read_magic_number(_UpperCamelCase , _UpperCamelCase )
except OSError:
return False
return any(magic_number.startswith(_UpperCamelCase ) for cls_magic_number in cls.magic_numbers )
class snake_case_ ( __A ):
'''simple docstring'''
@classmethod
def snake_case__( cls : Union[str, Any] , _UpperCamelCase : Union[Path, str] , **_UpperCamelCase : Any ) ->bool:
return tarfile.is_tarfile(_UpperCamelCase )
@staticmethod
def snake_case__( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Dict ) ->List[str]:
def resolved(_UpperCamelCase : str ) -> str:
return os.path.realpath(os.path.abspath(_UpperCamelCase ) )
def badpath(_UpperCamelCase : str , _UpperCamelCase : str ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(_UpperCamelCase , _UpperCamelCase ) ).startswith(_UpperCamelCase )
def badlink(_UpperCamelCase : Tuple , _UpperCamelCase : str ) -> bool:
# Links are interpreted relative to the directory containing the link
snake_case_ = resolved(os.path.join(_UpperCamelCase , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=_UpperCamelCase )
snake_case_ = resolved(_UpperCamelCase )
for finfo in members:
if badpath(finfo.name , _UpperCamelCase ):
logger.error(f'''Extraction of {finfo.name} is blocked (illegal path)''' )
elif finfo.issym() and badlink(_UpperCamelCase , _UpperCamelCase ):
logger.error(f'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''' )
elif finfo.islnk() and badlink(_UpperCamelCase , _UpperCamelCase ):
logger.error(f'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''' )
else:
yield finfo
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
snake_case_ = tarfile.open(_UpperCamelCase )
tar_file.extractall(_UpperCamelCase , members=TarExtractor.safemembers(_UpperCamelCase , _UpperCamelCase ) )
tar_file.close()
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = [b"\x1F\x8B"]
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
with gzip.open(_UpperCamelCase , '''rb''' ) as gzip_file:
with open(_UpperCamelCase , '''wb''' ) as extracted_file:
shutil.copyfileobj(_UpperCamelCase , _UpperCamelCase )
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = [
b"PK\x03\x04",
b"PK\x05\x06", # empty archive
b"PK\x07\x08", # spanned archive
]
@classmethod
def snake_case__( cls : List[str] , _UpperCamelCase : Union[Path, str] , _UpperCamelCase : bytes = b"" ) ->bool:
if super().is_extractable(_UpperCamelCase , magic_number=_UpperCamelCase ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(_UpperCamelCase , '''rb''' ) as fp:
snake_case_ = _EndRecData(_UpperCamelCase )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
snake_case_ = fp.read(_UpperCamelCase ) # CD is where we expect it to be
if len(_UpperCamelCase ) == sizeCentralDir:
snake_case_ = struct.unpack(_UpperCamelCase , _UpperCamelCase ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
with zipfile.ZipFile(_UpperCamelCase , '''r''' ) as zip_file:
zip_file.extractall(_UpperCamelCase )
zip_file.close()
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = [b"\xFD\x37\x7A\x58\x5A\x00"]
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
with lzma.open(_UpperCamelCase ) as compressed_file:
with open(_UpperCamelCase , '''wb''' ) as extracted_file:
shutil.copyfileobj(_UpperCamelCase , _UpperCamelCase )
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = [b"Rar!\x1a\x07\x00", b"Rar!\x1a\x07\x01\x00"] # RAR_ID # RAR5_ID
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
if not config.RARFILE_AVAILABLE:
raise ImportError('''Please pip install rarfile''' )
import rarfile
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
snake_case_ = rarfile.RarFile(_UpperCamelCase )
rf.extractall(_UpperCamelCase )
rf.close()
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = [b"\x28\xb5\x2F\xFD"]
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
if not config.ZSTANDARD_AVAILABLE:
raise ImportError('''Please pip install zstandard''' )
import zstandard as zstd
snake_case_ = zstd.ZstdDecompressor()
with open(_UpperCamelCase , '''rb''' ) as ifh, open(_UpperCamelCase , '''wb''' ) as ofh:
dctx.copy_stream(_UpperCamelCase , _UpperCamelCase )
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = [b"\x42\x5A\x68"]
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
with bza.open(_UpperCamelCase , '''rb''' ) as compressed_file:
with open(_UpperCamelCase , '''wb''' ) as extracted_file:
shutil.copyfileobj(_UpperCamelCase , _UpperCamelCase )
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = [b"\x37\x7A\xBC\xAF\x27\x1C"]
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
if not config.PY7ZR_AVAILABLE:
raise ImportError('''Please pip install py7zr''' )
import pyazr
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
with pyazr.SevenZipFile(_UpperCamelCase , '''r''' ) as archive:
archive.extractall(_UpperCamelCase )
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = [b"\x04\x22\x4D\x18"]
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
if not config.LZ4_AVAILABLE:
raise ImportError('''Please pip install lz4''' )
import lza.frame
with lza.frame.open(_UpperCamelCase , '''rb''' ) as compressed_file:
with open(_UpperCamelCase , '''wb''' ) as extracted_file:
shutil.copyfileobj(_UpperCamelCase , _UpperCamelCase )
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def snake_case__( cls : List[Any] ) ->List[str]:
return max(
len(_UpperCamelCase )
for extractor in cls.extractors.values()
if issubclass(_UpperCamelCase , _UpperCamelCase )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : int ) ->Tuple:
try:
return MagicNumberBaseExtractor.read_magic_number(_UpperCamelCase , magic_number_length=_UpperCamelCase )
except OSError:
return b""
@classmethod
def snake_case__( cls : Optional[Any] , _UpperCamelCase : Union[Path, str] , _UpperCamelCase : bool = False ) ->bool:
warnings.warn(
'''Method \'is_extractable\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '''
'''Use \'infer_extractor_format\' instead.''' , category=_UpperCamelCase , )
snake_case_ = cls.infer_extractor_format(_UpperCamelCase )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def snake_case__( cls : int , _UpperCamelCase : Union[Path, str] ) ->str: # <Added version="2.4.0"/>
snake_case_ = cls._get_magic_number_max_length()
snake_case_ = cls._read_magic_number(_UpperCamelCase , _UpperCamelCase )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(_UpperCamelCase , magic_number=_UpperCamelCase ):
return extractor_format
@classmethod
def snake_case__( cls : Optional[int] , _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Optional[str] = None , _UpperCamelCase : Optional[BaseExtractor] = "deprecated" , ) ->None:
os.makedirs(os.path.dirname(_UpperCamelCase ) , exist_ok=_UpperCamelCase )
# Prevent parallel extractions
snake_case_ = str(Path(_UpperCamelCase ).with_suffix('''.lock''' ) )
with FileLock(_UpperCamelCase ):
shutil.rmtree(_UpperCamelCase , ignore_errors=_UpperCamelCase )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(_UpperCamelCase , _UpperCamelCase ): # passed as positional arg
warnings.warn(
'''Parameter \'extractor\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '''
'''Use \'extractor_format\' instead.''' , category=_UpperCamelCase , )
snake_case_ = extractor if extractor != '''deprecated''' else extractor_format
else:
snake_case_ = cls.extractors[extractor_format]
return extractor.extract(_UpperCamelCase , _UpperCamelCase )
else:
warnings.warn(
'''Parameter \'extractor_format\' was made required in version 2.4.0 and not passing it will raise an '''
'''exception in 3.0.0.''' , category=_UpperCamelCase , )
for extractor in cls.extractors.values():
if extractor.is_extractable(_UpperCamelCase ):
return extractor.extract(_UpperCamelCase , _UpperCamelCase ) | 39 | 1 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
for attribute in key.split('''.''' ):
snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if weight_type is not None:
snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).shape
else:
snake_case_ = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
snake_case_ = value
elif weight_type == "weight_g":
snake_case_ = value
elif weight_type == "weight_v":
snake_case_ = value
elif weight_type == "bias":
snake_case_ = value
else:
snake_case_ = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = []
snake_case_ = fairseq_model.state_dict()
snake_case_ = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
snake_case_ = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , hf_model.config.feat_extract_norm == '''group''' , )
snake_case_ = True
else:
for key, mapped_key in MAPPING.items():
snake_case_ = '''hubert.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key
if key in name or (key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0] and not is_finetuned):
snake_case_ = True
if "*" in mapped_key:
snake_case_ = name.split(SCREAMING_SNAKE_CASE__ )[0].split('''.''' )[-2]
snake_case_ = mapped_key.replace('''*''' , SCREAMING_SNAKE_CASE__ )
if "weight_g" in name:
snake_case_ = '''weight_g'''
elif "weight_v" in name:
snake_case_ = '''weight_v'''
elif "weight" in name:
snake_case_ = '''weight'''
elif "bias" in name:
snake_case_ = '''bias'''
else:
snake_case_ = None
set_recursively(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE__ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = full_name.split('''conv_layers.''' )[-1]
snake_case_ = name.split('''.''' )
snake_case_ = int(items[0] )
snake_case_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
snake_case_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
snake_case_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
snake_case_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
snake_case_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(SCREAMING_SNAKE_CASE__ )
@torch.no_grad()
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=True ):
if config_path is not None:
snake_case_ = HubertConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
else:
snake_case_ = HubertConfig()
if is_finetuned:
if dict_path:
snake_case_ = Dictionary.load(SCREAMING_SNAKE_CASE__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
snake_case_ = target_dict.pad_index
snake_case_ = target_dict.bos_index
snake_case_ = target_dict.eos_index
snake_case_ = len(target_dict.symbols )
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''vocab.json''' )
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(SCREAMING_SNAKE_CASE__ ) )
return
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
with open(SCREAMING_SNAKE_CASE__ , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(target_dict.indices , SCREAMING_SNAKE_CASE__ )
snake_case_ = WavaVecaCTCTokenizer(
SCREAMING_SNAKE_CASE__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=SCREAMING_SNAKE_CASE__ , )
snake_case_ = True if config.feat_extract_norm == '''layer''' else False
snake_case_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , )
snake_case_ = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
snake_case_ = HubertForCTC(SCREAMING_SNAKE_CASE__ )
else:
snake_case_ = HubertModel(SCREAMING_SNAKE_CASE__ )
if is_finetuned:
snake_case_, snake_case_, snake_case_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
snake_case_, snake_case_, snake_case_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
snake_case_ = model[0].eval()
recursively_load_weights(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
lowerCAmelCase_ = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
) | 39 |
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
if any(not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or x < 0 for x in sequence ):
raise TypeError('''Sequence must be list of non-negative integers''' )
for _ in range(len(SCREAMING_SNAKE_CASE__ ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(SCREAMING_SNAKE_CASE__ , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9] | 39 | 1 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase_ = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class snake_case_ ( __A , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = XLMRobertaTokenizer
SCREAMING_SNAKE_CASE : str = XLMRobertaTokenizerFast
SCREAMING_SNAKE_CASE : Tuple = True
SCREAMING_SNAKE_CASE : List[str] = True
def snake_case__( self : Any ) ->Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ = XLMRobertaTokenizer(_UpperCamelCase , keep_accents=_UpperCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case__( self : Optional[int] ) ->int:
snake_case_ = '''<pad>'''
snake_case_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCamelCase ) , _UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCamelCase ) , _UpperCamelCase )
def snake_case__( self : str ) ->Optional[Any]:
snake_case_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(_UpperCamelCase ) , 1_0_0_2 )
def snake_case__( self : Union[str, Any] ) ->Optional[int]:
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_2 )
def snake_case__( self : int ) ->Any:
snake_case_ = XLMRobertaTokenizer(_UpperCamelCase , keep_accents=_UpperCamelCase )
snake_case_ = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_UpperCamelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
snake_case_ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_UpperCamelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
snake_case_ = tokenizer.convert_tokens_to_ids(_UpperCamelCase )
self.assertListEqual(
_UpperCamelCase , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
snake_case_ = tokenizer.convert_ids_to_tokens(_UpperCamelCase )
self.assertListEqual(
_UpperCamelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def snake_case__( self : Tuple ) ->Optional[int]:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
snake_case_ = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-xlm-roberta''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
snake_case_ = self.rust_tokenizer_class.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
snake_case_ = self.tokenizer_class.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
snake_case_ = tempfile.mkdtemp()
snake_case_ = tokenizer_r.save_pretrained(_UpperCamelCase )
snake_case_ = tokenizer_p.save_pretrained(_UpperCamelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
snake_case_ = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(_UpperCamelCase , _UpperCamelCase )
# Checks everything loads correctly in the same way
snake_case_ = tokenizer_r.from_pretrained(_UpperCamelCase )
snake_case_ = tokenizer_p.from_pretrained(_UpperCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_UpperCamelCase , _UpperCamelCase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(_UpperCamelCase )
# Save tokenizer rust, legacy_format=True
snake_case_ = tempfile.mkdtemp()
snake_case_ = tokenizer_r.save_pretrained(_UpperCamelCase , legacy_format=_UpperCamelCase )
snake_case_ = tokenizer_p.save_pretrained(_UpperCamelCase )
# Checks it save with the same files
self.assertSequenceEqual(_UpperCamelCase , _UpperCamelCase )
# Checks everything loads correctly in the same way
snake_case_ = tokenizer_r.from_pretrained(_UpperCamelCase )
snake_case_ = tokenizer_p.from_pretrained(_UpperCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_UpperCamelCase , _UpperCamelCase ) )
shutil.rmtree(_UpperCamelCase )
# Save tokenizer rust, legacy_format=False
snake_case_ = tempfile.mkdtemp()
snake_case_ = tokenizer_r.save_pretrained(_UpperCamelCase , legacy_format=_UpperCamelCase )
snake_case_ = tokenizer_p.save_pretrained(_UpperCamelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
snake_case_ = tokenizer_r.from_pretrained(_UpperCamelCase )
snake_case_ = tokenizer_p.from_pretrained(_UpperCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_UpperCamelCase , _UpperCamelCase ) )
shutil.rmtree(_UpperCamelCase )
@cached_property
def snake_case__( self : int ) ->List[str]:
return XLMRobertaTokenizer.from_pretrained('''xlm-roberta-base''' )
def snake_case__( self : Any ) ->Union[str, Any]:
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(_UpperCamelCase , f.name )
snake_case_ = XLMRobertaTokenizer(f.name , keep_accents=_UpperCamelCase )
snake_case_ = pickle.dumps(_UpperCamelCase )
pickle.loads(_UpperCamelCase )
def snake_case__( self : Any ) ->Optional[int]:
if not self.test_rust_tokenizer:
return
snake_case_ = self.get_tokenizer()
snake_case_ = self.get_rust_tokenizer()
snake_case_ = '''I was born in 92000, and this is falsé.'''
snake_case_ = tokenizer.tokenize(_UpperCamelCase )
snake_case_ = rust_tokenizer.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
snake_case_ = tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
snake_case_ = rust_tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
snake_case_ = self.get_rust_tokenizer()
snake_case_ = tokenizer.encode(_UpperCamelCase )
snake_case_ = rust_tokenizer.encode(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
@slow
def snake_case__( self : Optional[Any] ) ->str:
snake_case_ = '''Hello World!'''
snake_case_ = [0, 3_5_3_7_8, 6_6_6_1, 3_8, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(_UpperCamelCase , self.big_tokenizer.encode(_UpperCamelCase ) )
@slow
def snake_case__( self : str ) ->Tuple:
snake_case_ = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
snake_case_ = [
0,
3_2_9_3,
8_3,
1_0,
4_5_5_2,
4_9_8_9,
7_9_8_6,
6_7_8,
1_0,
5_9_1_5,
1_1_1,
1_7_9_4_5_9,
1_2_4_8_5_0,
4,
6_0_4_4,
2_3_7,
1_2,
6,
5,
6,
4,
6_7_8_0,
7_0_5,
1_5,
1_3_8_8,
4_4,
3_7_8,
1_0_1_1_4,
7_1_1,
1_5_2,
2_0,
6,
5,
2_2_3_7_6,
6_4_2,
1_2_2_1,
1_5_1_9_0,
3_4_1_5_3,
4_5_0,
5_6_0_8,
9_5_9,
1_1_1_9,
5_7_7_0_2,
1_3_6,
1_8_6,
4_7,
1_0_9_8,
2_9_3_6_7,
4_7,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6_0_4_4,
2_3_7,
6_2_8_4,
5_0_9_0_1,
5_2_8,
3_1,
9_0,
3_4,
9_2_7,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(_UpperCamelCase , self.big_tokenizer.encode(_UpperCamelCase ) )
@slow
def snake_case__( self : Dict ) ->List[str]:
# fmt: off
snake_case_ = {'''input_ids''': [[0, 1_1_0_6_2, 8_2_7_7_2, 7, 1_5, 8_2_7_7_2, 5_3_8, 5_1_5_2_9, 2_3_7, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 2_1_5_1_7_5, 1_3_1_4, 1_3_6, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 5_6_3_5_9, 4_2, 1_2_2_0_0_9, 9, 1_6_4_6_6, 1_6, 8_7_3_4_4, 4_5_3_7, 9, 4_7_1_7, 7_8_3_8_1, 6, 1_5_9_9_5_8, 7, 1_5, 2_4_4_8_0, 6_1_8, 4, 5_2_7, 2_2_6_9_3, 5_4_2_8, 4, 2_7_7_7, 2_4_4_8_0, 9_8_7_4, 4, 4_3_5_2_3, 5_9_4, 4, 8_0_3, 1_8_3_9_2, 3_3_1_8_9, 1_8, 4, 4_3_5_2_3, 2_4_4_4_7, 1_2_3_9_9, 1_0_0, 2_4_9_5_5, 8_3_6_5_8, 9_6_2_6, 1_4_4_0_5_7, 1_5, 8_3_9, 2_2_3_3_5, 1_6, 1_3_6, 2_4_9_5_5, 8_3_6_5_8, 8_3_4_7_9, 1_5, 3_9_1_0_2, 7_2_4, 1_6, 6_7_8, 6_4_5, 2_7_8_9, 1_3_2_8, 4_5_8_9, 4_2, 1_2_2_0_0_9, 1_1_5_7_7_4, 2_3, 8_0_5, 1_3_2_8, 4_6_8_7_6, 7, 1_3_6, 5_3_8_9_4, 1_9_4_0, 4_2_2_2_7, 4_1_1_5_9, 1_7_7_2_1, 8_2_3, 4_2_5, 4, 2_7_5_1_2, 9_8_7_2_2, 2_0_6, 1_3_6, 5_5_3_1, 4_9_7_0, 9_1_9, 1_7_3_3_6, 5, 2], [0, 2_0_0_8_0, 6_1_8, 8_3, 8_2_7_7_5, 4_7, 4_7_9, 9, 1_5_1_7, 7_3, 5_3_8_9_4, 3_3_3, 8_0_5_8_1, 1_1_0_1_1_7, 1_8_8_1_1, 5_2_5_6, 1_2_9_5, 5_1, 1_5_2_5_2_6, 2_9_7, 7_9_8_6, 3_9_0, 1_2_4_4_1_6, 5_3_8, 3_5_4_3_1, 2_1_4, 9_8, 1_5_0_4_4, 2_5_7_3_7, 1_3_6, 7_1_0_8, 4_3_7_0_1, 2_3, 7_5_6, 1_3_5_3_5_5, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_8_1, 6_3_7_7_3, 1_1_9_4_5_5, 6, 1_4_7_7_9_7, 8_8_2_0_3, 7, 6_4_5, 7_0, 2_1, 3_2_8_5, 1_0_2_6_9, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCamelCase , model_name='''xlm-roberta-base''' , revision='''d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3''' , ) | 39 |
import re
from filelock import FileLock
try:
import nltk
lowerCAmelCase_ = True
except (ImportError, ModuleNotFoundError):
lowerCAmelCase_ = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
re.sub('''<n>''' , '''''' , SCREAMING_SNAKE_CASE__ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(SCREAMING_SNAKE_CASE__ ) ) | 39 | 1 |
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case_ ( __A , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = TransfoXLTokenizer
SCREAMING_SNAKE_CASE : str = False
SCREAMING_SNAKE_CASE : Optional[Any] = False
def snake_case__( self : List[str] ) ->List[Any]:
super().setUp()
snake_case_ = [
'''<unk>''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''unwanted''',
'''wa''',
'''un''',
'''running''',
''',''',
'''low''',
'''l''',
]
snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def snake_case__( self : List[Any] , **_UpperCamelCase : Optional[Any] ) ->Dict:
snake_case_ = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def snake_case__( self : Optional[Any] , _UpperCamelCase : Dict ) ->Tuple:
snake_case_ = '''<unk> UNwanted , running'''
snake_case_ = '''<unk> unwanted, running'''
return input_text, output_text
def snake_case__( self : List[Any] ) ->str:
snake_case_ = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=_UpperCamelCase )
snake_case_ = tokenizer.tokenize('''<unk> UNwanted , running''' )
self.assertListEqual(_UpperCamelCase , ['''<unk>''', '''unwanted''', ''',''', '''running'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , [0, 4, 8, 7] )
def snake_case__( self : Dict ) ->int:
snake_case_ = TransfoXLTokenizer(lower_case=_UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
def snake_case__( self : Dict ) ->int:
snake_case_ = TransfoXLTokenizer(lower_case=_UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def snake_case__( self : List[Any] ) ->Tuple:
snake_case_ = TransfoXLTokenizer(lower_case=_UpperCamelCase )
snake_case_ = '''Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'''
snake_case_ = [
'''Hello''',
'''(''',
'''bracket''',
''')''',
'''and''',
'''side''',
'''@-@''',
'''scrolled''',
'''[''',
'''and''',
''']''',
'''Henry''',
'''\'s''',
'''$''',
'''5''',
'''@,@''',
'''000''',
'''with''',
'''3''',
'''@.@''',
'''34''',
'''m''',
'''.''',
'''What''',
'''\'s''',
'''up''',
'''!''',
'''?''',
]
self.assertListEqual(tokenizer.tokenize(_UpperCamelCase ) , _UpperCamelCase )
self.assertEqual(tokenizer.convert_tokens_to_string(_UpperCamelCase ) , _UpperCamelCase )
def snake_case__( self : Tuple ) ->Union[str, Any]:
snake_case_ = self.get_tokenizer()
snake_case_ = len(_UpperCamelCase )
tokenizer.add_tokens(['''new1''', '''new2'''] )
tokenizer.move_added_token('''new1''' , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(_UpperCamelCase ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('''new1''' ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , '''new1''' ) | 39 |
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = [0 for i in range(r + 1 )]
# nc0 = 1
snake_case_ = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
snake_case_ = min(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5)) | 39 | 1 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''Intel/dpt-large''': '''https://huggingface.co/Intel/dpt-large/resolve/main/config.json''',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = "dpt"
def __init__( self : Optional[Any] , _UpperCamelCase : Tuple=7_6_8 , _UpperCamelCase : Dict=1_2 , _UpperCamelCase : Union[str, Any]=1_2 , _UpperCamelCase : List[Any]=3_0_7_2 , _UpperCamelCase : Dict="gelu" , _UpperCamelCase : Union[str, Any]=0.0 , _UpperCamelCase : Optional[int]=0.0 , _UpperCamelCase : Optional[int]=0.02 , _UpperCamelCase : List[str]=1e-12 , _UpperCamelCase : Any=3_8_4 , _UpperCamelCase : int=1_6 , _UpperCamelCase : Any=3 , _UpperCamelCase : Dict=False , _UpperCamelCase : str=True , _UpperCamelCase : Union[str, Any]=[2, 5, 8, 1_1] , _UpperCamelCase : List[str]="project" , _UpperCamelCase : Optional[int]=[4, 2, 1, 0.5] , _UpperCamelCase : Dict=[9_6, 1_9_2, 3_8_4, 7_6_8] , _UpperCamelCase : Dict=2_5_6 , _UpperCamelCase : Optional[Any]=-1 , _UpperCamelCase : int=False , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : str=0.4 , _UpperCamelCase : Tuple=2_5_5 , _UpperCamelCase : Union[str, Any]=0.1 , _UpperCamelCase : Tuple=[1, 1_0_2_4, 2_4, 2_4] , _UpperCamelCase : List[str]=[0, 1] , _UpperCamelCase : List[Any]=None , **_UpperCamelCase : Dict , ) ->Any:
super().__init__(**_UpperCamelCase )
snake_case_ = hidden_size
snake_case_ = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info('''Initializing the config with a `BiT` backbone.''' )
snake_case_ = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
}
snake_case_ = BitConfig(**_UpperCamelCase )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
logger.info('''Initializing the config with a `BiT` backbone.''' )
snake_case_ = BitConfig(**_UpperCamelCase )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
snake_case_ = backbone_config
else:
raise ValueError(
f'''backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.''' )
snake_case_ = backbone_featmap_shape
snake_case_ = neck_ignore_stages
if readout_type != "project":
raise ValueError('''Readout type must be \'project\' when using `DPT-hybrid` mode.''' )
else:
snake_case_ = None
snake_case_ = None
snake_case_ = []
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = qkv_bias
snake_case_ = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError('''Readout_type must be one of [\'ignore\', \'add\', \'project\']''' )
snake_case_ = readout_type
snake_case_ = reassemble_factors
snake_case_ = neck_hidden_sizes
snake_case_ = fusion_hidden_size
snake_case_ = head_in_index
snake_case_ = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
snake_case_ = use_auxiliary_head
snake_case_ = auxiliary_loss_weight
snake_case_ = semantic_loss_ignore_index
snake_case_ = semantic_classifier_dropout
def snake_case__( self : List[str] ) ->List[Any]:
snake_case_ = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
snake_case_ = self.backbone_config.to_dict()
snake_case_ = self.__class__.model_type
return output | 39 |
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
lowerCAmelCase_ = {
'''gwf-440k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/gwf-440k.ckpt''',
'''sample_rate''': 4_80_00,
'''sample_size''': 6_55_36,
},
'''jmann-small-190k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt''',
'''sample_rate''': 4_80_00,
'''sample_size''': 6_55_36,
},
'''jmann-large-580k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt''',
'''sample_rate''': 4_80_00,
'''sample_size''': 13_10_72,
},
'''maestro-uncond-150k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt''',
'''sample_rate''': 1_60_00,
'''sample_size''': 6_55_36,
},
'''unlocked-uncond-250k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt''',
'''sample_rate''': 1_60_00,
'''sample_size''': 6_55_36,
},
'''honk-140k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/honk-140k.ckpt''',
'''sample_rate''': 1_60_00,
'''sample_size''': 6_55_36,
},
}
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return torch.atana(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) / math.pi * 2
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = torch.sin(t * math.pi / 2 ) ** 2
snake_case_ = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
class snake_case_ ( __A ):
'''simple docstring'''
pass
class snake_case_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , _UpperCamelCase : int ) ->Optional[int]:
super().__init__()
snake_case_ = DiffusionAttnUnetaD(_UpperCamelCase , n_attn_layers=4 )
snake_case_ = deepcopy(self.diffusion )
snake_case_ = torch.quasirandom.SobolEngine(1 , scramble=_UpperCamelCase )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = MODELS_MAP[model_name]['''url''']
os.system(F'''wget {url} ./''' )
return F'''./{model_name}.ckpt'''
lowerCAmelCase_ = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
}
lowerCAmelCase_ = {
'''8''': '''resnets.0''',
'''9''': '''attentions.0''',
'''10''': '''resnets.1''',
'''11''': '''attentions.1''',
'''12''': '''resnets.2''',
'''13''': '''attentions.2''',
}
lowerCAmelCase_ = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
'''8''': '''resnets.3''',
'''9''': '''attentions.3''',
'''10''': '''resnets.4''',
'''11''': '''attentions.4''',
'''12''': '''resnets.5''',
'''13''': '''attentions.5''',
}
lowerCAmelCase_ = {
'''0''': '''resnets.0''',
'''1''': '''resnets.1''',
'''2''': '''resnets.2''',
'''4''': '''resnets.0''',
'''5''': '''resnets.1''',
'''6''': '''resnets.2''',
}
lowerCAmelCase_ = {
'''skip''': '''conv_skip''',
'''main.0''': '''conv_1''',
'''main.1''': '''group_norm_1''',
'''main.3''': '''conv_2''',
'''main.4''': '''group_norm_2''',
}
lowerCAmelCase_ = {
'''norm''': '''group_norm''',
'''qkv_proj''': ['''query''', '''key''', '''value'''],
'''out_proj''': ['''proj_attn'''],
}
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
if name.startswith('''skip''' ):
return name.replace('''skip''' , RES_CONV_MAP['''skip'''] )
# name has to be of format main.{digit}
if not name.startswith('''main.''' ):
raise ValueError(F'''ResConvBlock error with {name}''' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
for key, value in ATTN_MAP.items():
if name.startswith(SCREAMING_SNAKE_CASE__ ) and not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return name.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif name.startswith(SCREAMING_SNAKE_CASE__ ):
return [name.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for v in value]
raise ValueError(F'''Attn error with {name}''' )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=13 ):
snake_case_ = input_string
if string.split('''.''' )[0] == "timestep_embed":
return string.replace('''timestep_embed''' , '''time_proj''' )
snake_case_ = 0
if string.startswith('''net.3.''' ):
depth += 1
snake_case_ = string[6:]
elif string.startswith('''net.''' ):
snake_case_ = string[4:]
while string.startswith('''main.7.''' ):
depth += 1
snake_case_ = string[7:]
if string.startswith('''main.''' ):
snake_case_ = string[5:]
# mid block
if string[:2].isdigit():
snake_case_ = string[:2]
snake_case_ = string[2:]
else:
snake_case_ = string[0]
snake_case_ = string[1:]
if depth == max_depth:
snake_case_ = MID_NUM_TO_LAYER[layer_num]
snake_case_ = '''mid_block'''
elif depth > 0 and int(SCREAMING_SNAKE_CASE__ ) < 7:
snake_case_ = DOWN_NUM_TO_LAYER[layer_num]
snake_case_ = F'''down_blocks.{depth}'''
elif depth > 0 and int(SCREAMING_SNAKE_CASE__ ) > 7:
snake_case_ = UP_NUM_TO_LAYER[layer_num]
snake_case_ = F'''up_blocks.{max_depth - depth - 1}'''
elif depth == 0:
snake_case_ = DEPTH_0_TO_LAYER[layer_num]
snake_case_ = F'''up_blocks.{max_depth - 1}''' if int(SCREAMING_SNAKE_CASE__ ) > 3 else '''down_blocks.0'''
if not string_left.startswith('''.''' ):
raise ValueError(F'''Naming error with {input_string} and string_left: {string_left}.''' )
snake_case_ = string_left[1:]
if "resnets" in new_layer:
snake_case_ = convert_resconv_naming(SCREAMING_SNAKE_CASE__ )
elif "attentions" in new_layer:
snake_case_ = convert_attn_naming(SCREAMING_SNAKE_CASE__ )
snake_case_ = new_string_left
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = prefix + '''.''' + new_layer + '''.''' + string_left
else:
snake_case_ = [prefix + '''.''' + new_layer + '''.''' + s for s in string_left]
return new_string
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = {}
for k, v in state_dict.items():
if k.endswith('''kernel''' ):
# up- and downsample layers, don't have trainable weights
continue
snake_case_ = rename(SCREAMING_SNAKE_CASE__ )
# check if we need to transform from Conv => Linear for attention
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = transform_conv_attns(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
snake_case_ = v
return new_state_dict
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if len(SCREAMING_SNAKE_CASE__ ) == 1:
if len(v.shape ) == 3:
# weight
snake_case_ = v[:, :, 0]
else:
# bias
snake_case_ = v
else:
# qkv matrices
snake_case_ = v.shape[0]
snake_case_ = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
snake_case_ = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
snake_case_ = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
snake_case_ = args.model_path.split('''/''' )[-1].split('''.''' )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), F'''Make sure to provide one of the official model names {MODELS_MAP.keys()}'''
snake_case_ = download(SCREAMING_SNAKE_CASE__ )
snake_case_ = MODELS_MAP[model_name]['''sample_rate''']
snake_case_ = MODELS_MAP[model_name]['''sample_size''']
snake_case_ = Object()
snake_case_ = sample_size
snake_case_ = sample_rate
snake_case_ = 0
snake_case_ = UNetaDModel(sample_size=SCREAMING_SNAKE_CASE__ , sample_rate=SCREAMING_SNAKE_CASE__ )
snake_case_ = diffusers_model.state_dict()
snake_case_ = DiffusionUncond(SCREAMING_SNAKE_CASE__ )
orig_model.load_state_dict(torch.load(args.model_path , map_location=SCREAMING_SNAKE_CASE__ )['''state_dict'''] )
snake_case_ = orig_model.diffusion_ema.eval()
snake_case_ = orig_model.state_dict()
snake_case_ = rename_orig_weights(SCREAMING_SNAKE_CASE__ )
snake_case_ = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
snake_case_ = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(SCREAMING_SNAKE_CASE__ ) == 0, F'''Problem with {renamed_minus_diffusers}'''
assert all(k.endswith('''kernel''' ) for k in list(SCREAMING_SNAKE_CASE__ ) ), F'''Problem with {diffusers_minus_renamed}'''
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), F'''Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'''
if key == "time_proj.weight":
snake_case_ = value.squeeze()
snake_case_ = value
diffusers_model.load_state_dict(SCREAMING_SNAKE_CASE__ )
snake_case_ = 100
snake_case_ = 33
snake_case_ = IPNDMScheduler(num_train_timesteps=SCREAMING_SNAKE_CASE__ )
snake_case_ = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
snake_case_ = torch.randn([1, 2, config.sample_size] , generator=SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
snake_case_ = torch.linspace(1 , 0 , steps + 1 , device=SCREAMING_SNAKE_CASE__ )[:-1]
snake_case_ = get_crash_schedule(SCREAMING_SNAKE_CASE__ )
snake_case_ = DanceDiffusionPipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
snake_case_ = torch.manual_seed(33 )
snake_case_ = pipe(num_inference_steps=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ ).audios
snake_case_ = sampling.iplms_sample(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , {} )
snake_case_ = generated.clamp(-1 , 1 )
snake_case_ = (generated - audio).abs().sum()
snake_case_ = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print('''Diff sum''' , SCREAMING_SNAKE_CASE__ )
print('''Diff max''' , SCREAMING_SNAKE_CASE__ )
assert diff_max < 1E-3, F'''Diff max: {diff_max} is too much :-/'''
print(F'''Conversion for {model_name} successful!''' )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
lowerCAmelCase_ = parser.parse_args()
main(args) | 39 | 1 |
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = "" ):
snake_case_ = url or '''https://www.imdb.com/chart/top/?ref_=nv_mv_250'''
snake_case_ = BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE__ ).text , '''html.parser''' )
snake_case_ = soup.find_all('''td''' , attrs='''titleColumn''' )
snake_case_ = soup.find_all('''td''' , class_='''ratingColumn imdbRating''' )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
}
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = "IMDb_Top_250_Movies.csv" ):
snake_case_ = get_imdb_top_aaa_movies()
with open(SCREAMING_SNAKE_CASE__ , '''w''' , newline='''''' ) as out_file:
snake_case_ = csv.writer(SCREAMING_SNAKE_CASE__ )
writer.writerow(['''Movie title''', '''IMDb rating'''] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies() | 39 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {'''configuration_vit_msn''': ['''VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMSNConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMSNModel''',
'''ViTMSNForImageClassification''',
'''ViTMSNPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 39 | 1 |
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = "laptop" ):
snake_case_ = F'''https://www.amazon.in/laptop/s?k={product}'''
snake_case_ = {
'''User-Agent''': '''Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36''',
'''Accept-Language''': '''en-US, en;q=0.5''',
}
snake_case_ = BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE__ , headers=SCREAMING_SNAKE_CASE__ ).text )
# Initialize a Pandas dataframe with the column titles
snake_case_ = DataFrame(
columns=[
'''Product Title''',
'''Product Link''',
'''Current Price of the product''',
'''Product Rating''',
'''MRP of the product''',
'''Discount''',
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'''div''' , attrs={'''class''': '''s-result-item''', '''data-component-type''': '''s-search-result'''} , ) , soup.find_all('''div''' , attrs={'''class''': '''a-row a-size-base a-color-base'''} ) , ):
try:
snake_case_ = item.ha.text
snake_case_ = '''https://www.amazon.in/''' + item.ha.a['''href''']
snake_case_ = item.find('''span''' , attrs={'''class''': '''a-offscreen'''} ).text
try:
snake_case_ = item.find('''span''' , attrs={'''class''': '''a-icon-alt'''} ).text
except AttributeError:
snake_case_ = '''Not available'''
try:
snake_case_ = (
'''₹'''
+ item.find(
'''span''' , attrs={'''class''': '''a-price a-text-price'''} ).text.split('''₹''' )[1]
)
except AttributeError:
snake_case_ = ''''''
try:
snake_case_ = float(
(
(
float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) )
- float(product_price.strip('''₹''' ).replace(''',''' , '''''' ) )
)
/ float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) )
)
* 100 )
except ValueError:
snake_case_ = float('''nan''' )
except AttributeError:
pass
snake_case_ = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
snake_case_ = ''' '''
snake_case_ = ''' '''
data_frame.index += 1
return data_frame
if __name__ == "__main__":
lowerCAmelCase_ = '''headphones'''
get_amazon_product_data(product).to_csv(f"""Amazon Product Data for {product}.csv""") | 39 |
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : Optional[Any] ) ->Any:
snake_case_ = tf.convert_to_tensor(
[
[
8.2220991, # 3rd highest value; idx. 0
-0.5620044,
5.23229752,
4.0386393,
-6.8798378,
-0.54785802,
-3.2012153,
2.92777176,
1.88171953,
7.35341276, # 5th highest value; idx. 9
8.43207833, # 2nd highest value; idx. 10
-9.85711836,
-5.96209236,
-1.13039161,
-7.1115294,
-0.8369633,
-5.3186408,
7.06427407,
0.81369344,
-0.82023817,
-5.9179796,
0.58813443,
-6.99778438,
4.71551189,
-0.18771637,
7.44020759, # 4th highest value; idx. 25
9.38450987, # 1st highest value; idx. 26
2.12662941,
-9.32562038,
2.35652522,
], # cummulative prob of 5 highest values <= 0.6
[
0.58425518,
4.53139238,
-5.57510464,
-6.28030699,
-7.19529503,
-4.02122551,
1.39337037,
-6.06707057,
1.59480517,
-9.643119,
0.03907799,
0.67231762,
-8.88206726,
6.27115922, # 4th highest value; idx. 13
2.28520723,
4.82767506,
4.30421368,
8.8275313, # 2nd highest value; idx. 17
5.44029958, # 5th highest value; idx. 18
-4.4735794,
7.38579536, # 3rd highest value; idx. 20
-2.91051663,
2.61946077,
-2.5674762,
-9.48959302,
-4.02922645,
-1.35416918,
9.67702323, # 1st highest value; idx. 27
-5.89478553,
1.85370467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
snake_case_ = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 1_0], [0, 2_5], [0, 2_6], [1, 1_3], [1, 1_7], [1, 1_8], [1, 2_0], [1, 2_7]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
snake_case_ = tf.convert_to_tensor(
[8.222099, 7.3534126, 8.432078, 7.4402075, 9.38451, 6.271159, 8.827531, 5.4402995, 7.3857956, 9.677023] , dtype=tf.floataa , ) # expected non filtered values as noted above
snake_case_ = tf_top_k_top_p_filtering(_UpperCamelCase , top_k=1_0 , top_p=0.6 , min_tokens_to_keep=4 )
snake_case_ = output[output != -float('''inf''' )]
snake_case_ = tf.cast(
tf.where(tf.not_equal(_UpperCamelCase , tf.constant(-float('''inf''' ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(_UpperCamelCase , _UpperCamelCase , rtol=1e-12 )
tf.debugging.assert_equal(_UpperCamelCase , _UpperCamelCase )
@require_tf
class snake_case_ ( unittest.TestCase , __A ):
'''simple docstring'''
if is_tf_available():
SCREAMING_SNAKE_CASE : Optional[int] = {
"AutoModelForCausalLM": TFAutoModelForCausalLM,
"AutoModelForSpeechSeq2Seq": TFAutoModelForSpeechSeqaSeq,
"AutoModelForSeq2SeqLM": TFAutoModelForSeqaSeqLM,
"AutoModelForVision2Seq": TFAutoModelForVisionaSeq,
"LogitsProcessorList": TFLogitsProcessorList,
"MinLengthLogitsProcessor": TFMinLengthLogitsProcessor,
"create_tensor_fn": tf.convert_to_tensor,
"floats_tensor": floats_tensor,
"return_tensors": "tf",
}
@slow
def snake_case__( self : List[Any] ) ->Optional[int]:
# TF-only test: tf.saved_model export
snake_case_ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
snake_case_ = 2
snake_case_ = 2
class snake_case_ ( tf.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , _UpperCamelCase : Optional[int] ) ->List[Any]:
super(_UpperCamelCase , self ).__init__()
snake_case_ = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((None, input_length) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=_UpperCamelCase , )
def snake_case__( self : List[Any] , _UpperCamelCase : int , _UpperCamelCase : Union[str, Any] ) ->List[Any]:
snake_case_ = self.model.generate(
input_ids=_UpperCamelCase , attention_mask=_UpperCamelCase , max_new_tokens=_UpperCamelCase , return_dict_in_generate=_UpperCamelCase , )
return {"sequences": outputs["sequences"]}
snake_case_ = [[2, 0], [1_0_2, 1_0_3]]
snake_case_ = [[1, 0], [1, 1]]
snake_case_ = DummyModel(model=_UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(_UpperCamelCase , _UpperCamelCase , signatures={'''serving_default''': dummy_model.serving} )
snake_case_ = tf.saved_model.load(_UpperCamelCase ).signatures['''serving_default''']
for batch_size in range(1 , len(_UpperCamelCase ) + 1 ):
snake_case_ = {
'''input_ids''': tf.constant(dummy_input_ids[:batch_size] ),
'''attention_mask''': tf.constant(dummy_attention_masks[:batch_size] ),
}
snake_case_ = serving_func(**_UpperCamelCase )['''sequences''']
snake_case_ = test_model.generate(**_UpperCamelCase , max_new_tokens=_UpperCamelCase )
tf.debugging.assert_equal(_UpperCamelCase , _UpperCamelCase )
@slow
def snake_case__( self : List[str] ) ->int:
# TF-only test: tf.saved_model export
snake_case_ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
snake_case_ = 1
snake_case_ = 2
class snake_case_ ( tf.Module ):
'''simple docstring'''
def __init__( self : str , _UpperCamelCase : Any ) ->List[str]:
super(_UpperCamelCase , self ).__init__()
snake_case_ = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((batch_size, None) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=_UpperCamelCase , )
def snake_case__( self : int , _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] ) ->Optional[int]:
snake_case_ = self.model.generate(
input_ids=_UpperCamelCase , attention_mask=_UpperCamelCase , max_new_tokens=_UpperCamelCase , return_dict_in_generate=_UpperCamelCase , )
return {"sequences": outputs["sequences"]}
snake_case_ = [[2], [1_0_2, 1_0_3]]
snake_case_ = [[1], [1, 1]]
snake_case_ = DummyModel(model=_UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(_UpperCamelCase , _UpperCamelCase , signatures={'''serving_default''': dummy_model.serving} )
snake_case_ = tf.saved_model.load(_UpperCamelCase ).signatures['''serving_default''']
for input_row in range(len(_UpperCamelCase ) ):
snake_case_ = {
'''input_ids''': tf.constant([dummy_input_ids[input_row]] ),
'''attention_mask''': tf.constant([dummy_attention_masks[input_row]] ),
}
snake_case_ = serving_func(**_UpperCamelCase )['''sequences''']
snake_case_ = test_model.generate(**_UpperCamelCase , max_new_tokens=_UpperCamelCase )
tf.debugging.assert_equal(_UpperCamelCase , _UpperCamelCase )
@slow
@require_tensorflow_text
def snake_case__( self : Optional[Any] ) ->List[Any]:
# TF-only test: tf.saved_model export
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id='''google/flan-t5-small''' , filename='''spiece.model''' , local_dir=_UpperCamelCase )
class snake_case_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Tuple ) ->List[Any]:
super().__init__()
snake_case_ = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(_UpperCamelCase , '''spiece.model''' ) , '''rb''' ).read() )
snake_case_ = TFAutoModelForSeqaSeqLM.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
def snake_case__( self : Optional[Any] , _UpperCamelCase : List[Any] , *_UpperCamelCase : Optional[int] , **_UpperCamelCase : str ) ->List[Any]:
snake_case_ = self.tokenizer.tokenize(_UpperCamelCase )
snake_case_, snake_case_ = text.pad_model_inputs(
_UpperCamelCase , max_seq_length=6_4 , pad_value=self.model.config.pad_token_id )
snake_case_ = self.model.generate(input_ids=_UpperCamelCase , attention_mask=_UpperCamelCase )
return self.tokenizer.detokenize(_UpperCamelCase )
snake_case_ = CompleteSentenceTransformer()
snake_case_ = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='''inputs''' )
snake_case_ = complete_model(_UpperCamelCase )
snake_case_ = tf.keras.Model(_UpperCamelCase , _UpperCamelCase )
keras_model.save(_UpperCamelCase )
def snake_case__( self : Any ) ->List[Any]:
# Has PT equivalent: this test relies on random sampling
snake_case_ = {
'''do_sample''': True,
'''num_beams''': 1,
'''top_p''': 0.7,
'''top_k''': 1_0,
'''temperature''': 0.7,
}
snake_case_ = 1_4
snake_case_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
snake_case_ = '''Hello, my dog is cute and'''
snake_case_ = tokenizer(_UpperCamelCase , return_tensors='''tf''' )
snake_case_ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
snake_case_ = 6_3_8
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
snake_case_ = model.generate(**_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
self.assertTrue(expectation == len(generated_tokens[0] ) )
snake_case_ = [6_3_8, 1_9_8]
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
snake_case_ = model.generate(**_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def snake_case__( self : str ) ->Dict:
# Has PT equivalent: ample use of framework-specific code
snake_case_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
snake_case_ = '''Hugging Face is a technology company based in New York and Paris.'''
snake_case_ = bart_tokenizer(_UpperCamelCase , return_tensors='''tf''' ).input_ids
snake_case_ = TFBartForConditionalGeneration.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
snake_case_ = bart_model.generate(_UpperCamelCase ).numpy()
class snake_case_ ( __A ):
'''simple docstring'''
def snake_case__( self : str , _UpperCamelCase : Any , _UpperCamelCase : Tuple=None , **_UpperCamelCase : Optional[int] ) ->List[str]:
return super().call(_UpperCamelCase , **_UpperCamelCase )
snake_case_ = FakeBart.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
snake_case_ = bart_model.generate(_UpperCamelCase , foo='''bar''' ).numpy()
self.assertTrue(np.array_equal(_UpperCamelCase , _UpperCamelCase ) )
class snake_case_ ( bart_model.model.encoder.__class__ ):
'''simple docstring'''
def snake_case__( self : Union[str, Any] , _UpperCamelCase : str , **_UpperCamelCase : Tuple ) ->Optional[Any]:
return super().call(_UpperCamelCase , **_UpperCamelCase )
snake_case_ = FakeEncoder(bart_model.config , bart_model.model.shared )
snake_case_ = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
snake_case_ = bart_model.generate(_UpperCamelCase ).numpy()
with self.assertRaises(_UpperCamelCase ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(_UpperCamelCase , foo='''bar''' ) | 39 | 1 |
import cmath
import math
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = math.radians(SCREAMING_SNAKE_CASE__ )
snake_case_ = math.radians(SCREAMING_SNAKE_CASE__ )
# Convert voltage and current to rectangular form
snake_case_ = cmath.rect(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ = cmath.rect(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod() | 39 |
import unittest
from transformers import DonutProcessor
lowerCAmelCase_ = '''naver-clova-ix/donut-base'''
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : Union[str, Any] ) ->Any:
snake_case_ = DonutProcessor.from_pretrained(_UpperCamelCase )
def snake_case__( self : Dict ) ->str:
snake_case_ = {
'''name''': '''John Doe''',
'''age''': '''99''',
'''city''': '''Atlanta''',
'''state''': '''GA''',
'''zip''': '''30301''',
'''phone''': '''123-4567''',
'''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}],
}
snake_case_ = (
'''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'''
'''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'''
'''<s_nicknames><s_nickname>Johnny</s_nickname>'''
'''<sep/><s_nickname>JD</s_nickname></s_nicknames>'''
)
snake_case_ = self.processor.tokenajson(_UpperCamelCase )
self.assertDictEqual(_UpperCamelCase , _UpperCamelCase ) | 39 | 1 |
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class snake_case_ ( __A , __A , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE : List[Any] = (
{
"feature-extraction": TFMobileBertModel,
"fill-mask": TFMobileBertForMaskedLM,
"question-answering": TFMobileBertForQuestionAnswering,
"text-classification": TFMobileBertForSequenceClassification,
"token-classification": TFMobileBertForTokenClassification,
"zero-shot": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE : int = False
SCREAMING_SNAKE_CASE : Optional[int] = False
def snake_case__( self : List[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : Dict=False ) ->List[str]:
snake_case_ = super()._prepare_for_class(_UpperCamelCase , _UpperCamelCase , return_labels=_UpperCamelCase )
if return_labels:
if model_class in get_values(_UpperCamelCase ):
snake_case_ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class snake_case_ ( __A ):
'''simple docstring'''
def __init__( self : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : Dict=1_3 , _UpperCamelCase : Tuple=7 , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : str=True , _UpperCamelCase : Any=True , _UpperCamelCase : Optional[Any]=True , _UpperCamelCase : str=9_9 , _UpperCamelCase : Tuple=3_2 , _UpperCamelCase : int=3_2 , _UpperCamelCase : int=2 , _UpperCamelCase : List[str]=4 , _UpperCamelCase : Optional[int]=3_7 , _UpperCamelCase : List[str]="gelu" , _UpperCamelCase : List[str]=0.1 , _UpperCamelCase : Dict=0.1 , _UpperCamelCase : int=5_1_2 , _UpperCamelCase : Any=1_6 , _UpperCamelCase : Tuple=2 , _UpperCamelCase : List[str]=0.02 , _UpperCamelCase : Any=3 , _UpperCamelCase : List[Any]=4 , _UpperCamelCase : str=None , ) ->List[Any]:
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = num_choices
snake_case_ = scope
snake_case_ = embedding_size
def snake_case__( self : Optional[int] ) ->Optional[int]:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_input_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ = None
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__( self : Union[str, Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : str , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any] ) ->Optional[Any]:
snake_case_ = TFMobileBertModel(config=_UpperCamelCase )
snake_case_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
snake_case_ = model(_UpperCamelCase )
snake_case_ = [input_ids, input_mask]
snake_case_ = model(_UpperCamelCase )
snake_case_ = model(_UpperCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def snake_case__( self : Dict , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : List[Any] , _UpperCamelCase : str , _UpperCamelCase : List[str] , _UpperCamelCase : Any , _UpperCamelCase : Optional[int] ) ->List[str]:
snake_case_ = TFMobileBertForMaskedLM(config=_UpperCamelCase )
snake_case_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
snake_case_ = model(_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__( self : Any , _UpperCamelCase : List[str] , _UpperCamelCase : List[Any] , _UpperCamelCase : str , _UpperCamelCase : Dict , _UpperCamelCase : List[Any] , _UpperCamelCase : Any , _UpperCamelCase : Optional[Any] ) ->Optional[Any]:
snake_case_ = TFMobileBertForNextSentencePrediction(config=_UpperCamelCase )
snake_case_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
snake_case_ = model(_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def snake_case__( self : Optional[Any] , _UpperCamelCase : Any , _UpperCamelCase : List[Any] , _UpperCamelCase : Any , _UpperCamelCase : str , _UpperCamelCase : int , _UpperCamelCase : Tuple , _UpperCamelCase : Any ) ->List[Any]:
snake_case_ = TFMobileBertForPreTraining(config=_UpperCamelCase )
snake_case_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
snake_case_ = model(_UpperCamelCase )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def snake_case__( self : int , _UpperCamelCase : Any , _UpperCamelCase : str , _UpperCamelCase : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : int ) ->Any:
snake_case_ = self.num_labels
snake_case_ = TFMobileBertForSequenceClassification(config=_UpperCamelCase )
snake_case_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
snake_case_ = model(_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__( self : List[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Any , _UpperCamelCase : Dict , _UpperCamelCase : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : Dict , _UpperCamelCase : str ) ->Union[str, Any]:
snake_case_ = self.num_choices
snake_case_ = TFMobileBertForMultipleChoice(config=_UpperCamelCase )
snake_case_ = tf.tile(tf.expand_dims(_UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
snake_case_ = tf.tile(tf.expand_dims(_UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
snake_case_ = tf.tile(tf.expand_dims(_UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
snake_case_ = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
snake_case_ = model(_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case__( self : List[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Any , _UpperCamelCase : List[str] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str ) ->str:
snake_case_ = self.num_labels
snake_case_ = TFMobileBertForTokenClassification(config=_UpperCamelCase )
snake_case_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
snake_case_ = model(_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case__( self : Optional[int] , _UpperCamelCase : Tuple , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : int , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[str] , _UpperCamelCase : Any , _UpperCamelCase : Optional[Any] ) ->str:
snake_case_ = TFMobileBertForQuestionAnswering(config=_UpperCamelCase )
snake_case_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
snake_case_ = model(_UpperCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case__( self : Union[str, Any] ) ->List[str]:
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
),
) = config_and_inputs
snake_case_ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
def snake_case__( self : Tuple ) ->Optional[Any]:
snake_case_ = TFMobileBertModelTest.TFMobileBertModelTester(self )
snake_case_ = ConfigTester(self , config_class=_UpperCamelCase , hidden_size=3_7 )
def snake_case__( self : Optional[Any] ) ->Any:
self.config_tester.run_common_tests()
def snake_case__( self : str ) ->Dict:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*_UpperCamelCase )
def snake_case__( self : List[str] ) ->Optional[Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*_UpperCamelCase )
def snake_case__( self : Optional[Any] ) ->List[str]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*_UpperCamelCase )
def snake_case__( self : Tuple ) ->Optional[Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*_UpperCamelCase )
def snake_case__( self : List[str] ) ->Union[str, Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*_UpperCamelCase )
def snake_case__( self : Union[str, Any] ) ->Dict:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*_UpperCamelCase )
def snake_case__( self : Optional[Any] ) ->Union[str, Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*_UpperCamelCase )
def snake_case__( self : Union[str, Any] ) ->Tuple:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*_UpperCamelCase )
@slow
def snake_case__( self : List[Any] ) ->int:
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
snake_case_ = TFMobileBertModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
@require_tf
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case__( self : Tuple ) ->List[Any]:
snake_case_ = TFMobileBertForPreTraining.from_pretrained('''google/mobilebert-uncased''' )
snake_case_ = tf.constant([[0, 1, 2, 3, 4, 5]] )
snake_case_ = model(_UpperCamelCase )[0]
snake_case_ = [1, 6, 3_0_5_2_2]
self.assertEqual(output.shape , _UpperCamelCase )
snake_case_ = tf.constant(
[
[
[-4.5919547, -9.248295, -9.645256],
[-6.7306175, -6.440284, -6.6052837],
[-7.2743506, -6.7847915, -6.024673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _UpperCamelCase , atol=1e-4 ) | 39 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
if not nums:
raise ValueError('''List is empty''' )
return sum(SCREAMING_SNAKE_CASE__ ) / len(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod() | 39 | 1 |
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = []
for part_id in partition_order:
snake_case_ = df.where(F'''SPARK_PARTITION_ID() = {part_id}''' ).collect()
for row_idx, row in enumerate(SCREAMING_SNAKE_CASE__ ):
expected_row_ids_and_row_dicts.append((F'''{part_id}_{row_idx}''', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def __SCREAMING_SNAKE_CASE ():
snake_case_ = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
snake_case_ = spark.range(100 ).repartition(1 )
snake_case_ = Spark(SCREAMING_SNAKE_CASE__ )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def __SCREAMING_SNAKE_CASE ():
snake_case_ = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
snake_case_ = spark.range(10 ).repartition(2 )
snake_case_ = [1, 0]
snake_case_ = _generate_iterable_examples(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # Reverse the partitions.
snake_case_ = _get_expected_row_ids_and_row_dicts_for_partition_order(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
snake_case_, snake_case_ = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __SCREAMING_SNAKE_CASE ():
snake_case_ = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
snake_case_ = spark.range(10 ).repartition(1 )
snake_case_ = SparkExamplesIterable(SCREAMING_SNAKE_CASE__ )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(SCREAMING_SNAKE_CASE__ ):
assert row_id == F'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def __SCREAMING_SNAKE_CASE ():
snake_case_ = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
snake_case_ = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('''numpy.random.Generator''' ) as generator_mock:
snake_case_ = lambda SCREAMING_SNAKE_CASE__ : x.reverse()
snake_case_ = _get_expected_row_ids_and_row_dicts_for_partition_order(SCREAMING_SNAKE_CASE__ , [2, 1, 0] )
snake_case_ = SparkExamplesIterable(SCREAMING_SNAKE_CASE__ ).shuffle_data_sources(SCREAMING_SNAKE_CASE__ )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(SCREAMING_SNAKE_CASE__ ):
snake_case_, snake_case_ = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __SCREAMING_SNAKE_CASE ():
snake_case_ = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
snake_case_ = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
snake_case_ = SparkExamplesIterable(SCREAMING_SNAKE_CASE__ ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
snake_case_ = _get_expected_row_ids_and_row_dicts_for_partition_order(SCREAMING_SNAKE_CASE__ , [0, 2] )
for i, (row_id, row_dict) in enumerate(SCREAMING_SNAKE_CASE__ ):
snake_case_, snake_case_ = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
snake_case_ = SparkExamplesIterable(SCREAMING_SNAKE_CASE__ ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
snake_case_ = _get_expected_row_ids_and_row_dicts_for_partition_order(SCREAMING_SNAKE_CASE__ , [1, 3] )
for i, (row_id, row_dict) in enumerate(SCREAMING_SNAKE_CASE__ ):
snake_case_, snake_case_ = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __SCREAMING_SNAKE_CASE ():
snake_case_ = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
snake_case_ = spark.range(100 ).repartition(1 )
snake_case_ = Spark(SCREAMING_SNAKE_CASE__ )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100 | 39 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : List[str] ) ->str:
snake_case_ = inspect.getfile(accelerate.test_utils )
snake_case_ = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps''', '''test_metrics.py'''] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
snake_case_ = test_metrics
@require_cpu
def snake_case__( self : str ) ->int:
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def snake_case__( self : Union[str, Any] ) ->Any:
debug_launcher(self.test_metrics.main )
@require_single_gpu
def snake_case__( self : List[Any] ) ->Tuple:
self.test_metrics.main()
@require_multi_gpu
def snake_case__( self : Any ) ->Union[str, Any]:
print(f'''Found {torch.cuda.device_count()} devices.''' )
snake_case_ = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() ) | 39 | 1 |
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
lowerCAmelCase_ = random.Random()
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=1.0 , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None ):
if rng is None:
snake_case_ = global_rng
snake_case_ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Tuple , _UpperCamelCase : Optional[int] , _UpperCamelCase : Tuple=7 , _UpperCamelCase : List[Any]=4_0_0 , _UpperCamelCase : Optional[Any]=2_0_0_0 , _UpperCamelCase : Dict=1 , _UpperCamelCase : Union[str, Any]=0.0 , _UpperCamelCase : Tuple=1_6_0_0_0 , _UpperCamelCase : Optional[Any]=True , _UpperCamelCase : Union[str, Any]=8_0 , _UpperCamelCase : int=1_6 , _UpperCamelCase : Any=6_4 , _UpperCamelCase : Tuple="hann_window" , _UpperCamelCase : Dict=8_0 , _UpperCamelCase : int=7_6_0_0 , _UpperCamelCase : Union[str, Any]=1e-10 , _UpperCamelCase : str=True , ) ->Optional[Any]:
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = min_seq_length
snake_case_ = max_seq_length
snake_case_ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
snake_case_ = feature_size
snake_case_ = padding_value
snake_case_ = sampling_rate
snake_case_ = do_normalize
snake_case_ = num_mel_bins
snake_case_ = hop_length
snake_case_ = win_length
snake_case_ = win_function
snake_case_ = fmin
snake_case_ = fmax
snake_case_ = mel_floor
snake_case_ = return_attention_mask
def snake_case__( self : Union[str, Any] ) ->str:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def snake_case__( self : Dict , _UpperCamelCase : str=False , _UpperCamelCase : Any=False ) ->Tuple:
def _flatten(_UpperCamelCase : int ):
return list(itertools.chain(*_UpperCamelCase ) )
if equal_length:
snake_case_ = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
snake_case_ = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
snake_case_ = [np.asarray(_UpperCamelCase ) for x in speech_inputs]
return speech_inputs
def snake_case__( self : Optional[int] , _UpperCamelCase : Optional[Any]=False , _UpperCamelCase : Optional[int]=False ) ->Union[str, Any]:
if equal_length:
snake_case_ = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
snake_case_ = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
snake_case_ = [np.asarray(_UpperCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
class snake_case_ ( __A , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = SpeechTaFeatureExtractor
def snake_case__( self : List[Any] ) ->int:
snake_case_ = SpeechTaFeatureExtractionTester(self )
def snake_case__( self : Dict , _UpperCamelCase : Optional[int] ) ->Union[str, Any]:
self.assertTrue(np.all(np.mean(_UpperCamelCase , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(_UpperCamelCase , axis=0 ) - 1 ) < 1e-3 ) )
def snake_case__( self : List[str] ) ->List[str]:
# Tests that all call wrap to encode_plus and batch_encode_plus
snake_case_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
snake_case_ = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
snake_case_ = [np.asarray(_UpperCamelCase ) for speech_input in speech_inputs]
# Test not batched input
snake_case_ = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
snake_case_ = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-3 ) )
# Test batched
snake_case_ = feat_extract(_UpperCamelCase , return_tensors='''np''' ).input_values
snake_case_ = feat_extract(_UpperCamelCase , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(_UpperCamelCase , _UpperCamelCase ):
self.assertTrue(np.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-3 ) )
def snake_case__( self : List[str] ) ->Optional[Any]:
snake_case_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case_ = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
snake_case_ = ['''longest''', '''max_length''', '''do_not_pad''']
snake_case_ = [None, 1_6_0_0, None]
for max_length, padding in zip(_UpperCamelCase , _UpperCamelCase ):
snake_case_ = feat_extract(_UpperCamelCase , padding=_UpperCamelCase , max_length=_UpperCamelCase , return_tensors='''np''' )
snake_case_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self.assertTrue(input_values[0][8_0_0:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self.assertTrue(input_values[0][1_0_0_0:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def snake_case__( self : Any ) ->Tuple:
snake_case_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case_ = range(8_0_0 , 1_4_0_0 , 2_0_0 )
snake_case_ = [floats_list((1, x) )[0] for x in lengths]
snake_case_ = ['''longest''', '''max_length''', '''do_not_pad''']
snake_case_ = [None, 1_6_0_0, None]
for max_length, padding in zip(_UpperCamelCase , _UpperCamelCase ):
snake_case_ = feat_extract(_UpperCamelCase , max_length=_UpperCamelCase , padding=_UpperCamelCase )
snake_case_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def snake_case__( self : Optional[int] ) ->Any:
snake_case_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case_ = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
snake_case_ = feat_extract(
_UpperCamelCase , truncation=_UpperCamelCase , max_length=1_0_0_0 , padding='''max_length''' , return_tensors='''np''' )
snake_case_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def snake_case__( self : Union[str, Any] ) ->Any:
snake_case_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case_ = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
snake_case_ = feat_extract(
_UpperCamelCase , truncation=_UpperCamelCase , max_length=1_0_0_0 , padding='''longest''' , return_tensors='''np''' )
snake_case_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_0_0_0) )
snake_case_ = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
snake_case_ = feat_extract(
_UpperCamelCase , truncation=_UpperCamelCase , max_length=2_0_0_0 , padding='''longest''' , return_tensors='''np''' )
snake_case_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_2_0_0) )
def snake_case__( self : Dict ) ->str:
snake_case_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case_ = np.random.rand(1_0_0 ).astype(np.floataa )
snake_case_ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
snake_case_ = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
snake_case_ = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def snake_case__( self : List[Any] ) ->List[Any]:
# Tests that all call wrap to encode_plus and batch_encode_plus
snake_case_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
snake_case_ = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
snake_case_ = [np.asarray(_UpperCamelCase ) for speech_input in speech_inputs]
# Test feature size
snake_case_ = feature_extractor(audio_target=_UpperCamelCase , padding=_UpperCamelCase , return_tensors='''np''' ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
snake_case_ = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_values
snake_case_ = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-3 ) )
# Test batched
snake_case_ = feature_extractor(_UpperCamelCase , return_tensors='''np''' ).input_values
snake_case_ = feature_extractor(_UpperCamelCase , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(_UpperCamelCase , _UpperCamelCase ):
self.assertTrue(np.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
snake_case_ = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
snake_case_ = np.asarray(_UpperCamelCase )
snake_case_ = feature_extractor(_UpperCamelCase , return_tensors='''np''' ).input_values
snake_case_ = feature_extractor(_UpperCamelCase , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(_UpperCamelCase , _UpperCamelCase ):
self.assertTrue(np.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-3 ) )
def snake_case__( self : Any ) ->Tuple:
snake_case_ = self.feat_extract_tester.prepare_inputs_for_target()
snake_case_ = self.feature_extraction_class(**self.feat_extract_dict )
snake_case_ = feat_extract.model_input_names[0]
snake_case_ = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(_UpperCamelCase ) == len(_UpperCamelCase ) for x, y in zip(_UpperCamelCase , processed_features[input_name] ) ) )
snake_case_ = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_UpperCamelCase )
snake_case_ = BatchFeature({input_name: speech_inputs} , tensor_type='''np''' )
snake_case_ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
snake_case_ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def snake_case__( self : List[str] ) ->Union[str, Any]:
snake_case_ = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_UpperCamelCase )
snake_case_ = self.feature_extraction_class(**self.feat_extract_dict )
snake_case_ = feat_extract.model_input_names[0]
snake_case_ = BatchFeature({input_name: speech_inputs} , tensor_type='''pt''' )
snake_case_ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
snake_case_ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def snake_case__( self : Optional[int] ) ->Dict:
snake_case_ = self.feature_extraction_class(**self.feat_extract_dict )
snake_case_ = self.feat_extract_tester.prepare_inputs_for_target()
snake_case_ = feat_extract.model_input_names[0]
snake_case_ = BatchFeature({input_name: speech_inputs} )
snake_case_ = feat_extract.num_mel_bins # hack!
snake_case_ = feat_extract.pad(_UpperCamelCase , padding='''longest''' , return_tensors='''np''' )[input_name]
snake_case_ = feat_extract.pad(_UpperCamelCase , padding='''longest''' , return_tensors='''pt''' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def snake_case__( self : Optional[Any] ) ->str:
snake_case_ = self.feat_extract_dict
snake_case_ = True
snake_case_ = self.feature_extraction_class(**_UpperCamelCase )
snake_case_ = self.feat_extract_tester.prepare_inputs_for_target()
snake_case_ = [len(_UpperCamelCase ) for x in speech_inputs]
snake_case_ = feat_extract.model_input_names[0]
snake_case_ = BatchFeature({input_name: speech_inputs} )
snake_case_ = feat_extract.num_mel_bins # hack!
snake_case_ = feat_extract.pad(_UpperCamelCase , padding='''longest''' , return_tensors='''np''' )
self.assertIn('''attention_mask''' , _UpperCamelCase )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _UpperCamelCase )
def snake_case__( self : Tuple ) ->str:
snake_case_ = self.feat_extract_dict
snake_case_ = True
snake_case_ = self.feature_extraction_class(**_UpperCamelCase )
snake_case_ = self.feat_extract_tester.prepare_inputs_for_target()
snake_case_ = [len(_UpperCamelCase ) for x in speech_inputs]
snake_case_ = feat_extract.model_input_names[0]
snake_case_ = BatchFeature({input_name: speech_inputs} )
snake_case_ = min(_UpperCamelCase )
snake_case_ = feat_extract.num_mel_bins # hack!
snake_case_ = feat_extract.pad(
_UpperCamelCase , padding='''max_length''' , max_length=_UpperCamelCase , truncation=_UpperCamelCase , return_tensors='''np''' )
self.assertIn('''attention_mask''' , _UpperCamelCase )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def snake_case__( self : List[Any] , _UpperCamelCase : Union[str, Any] ) ->Any:
from datasets import load_dataset
snake_case_ = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
snake_case_ = ds.sort('''id''' ).select(range(_UpperCamelCase ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def snake_case__( self : List[Any] ) ->List[Any]:
# fmt: off
snake_case_ = torch.tensor(
[2.3804e-03, 2.0752e-03, 1.9836e-03, 2.1057e-03, 1.6174e-03,
3.0518e-04, 9.1553e-05, 3.3569e-04, 9.7656e-04, 1.8311e-03,
2.0142e-03, 2.1057e-03, 1.7395e-03, 4.5776e-04, -3.9673e-04,
4.5776e-04, 1.0071e-03, 9.1553e-05, 4.8828e-04, 1.1597e-03,
7.3242e-04, 9.4604e-04, 1.8005e-03, 1.8311e-03, 8.8501e-04,
4.2725e-04, 4.8828e-04, 7.3242e-04, 1.0986e-03, 2.1057e-03] )
# fmt: on
snake_case_ = self._load_datasamples(1 )
snake_case_ = SpeechTaFeatureExtractor()
snake_case_ = feature_extractor(_UpperCamelCase , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 9_3_6_8_0) )
self.assertTrue(torch.allclose(input_values[0, :3_0] , _UpperCamelCase , atol=1e-6 ) )
def snake_case__( self : Optional[int] ) ->List[str]:
# fmt: off
snake_case_ = torch.tensor(
[-2.6870, -3.0104, -3.1356, -3.5352, -3.0044, -3.0353, -3.4719, -3.6777,
-3.1520, -2.9435, -2.6553, -2.8795, -2.9944, -2.5921, -3.0279, -3.0386,
-3.0864, -3.1291, -3.2353, -2.7444, -2.6831, -2.7287, -3.1761, -3.1571,
-3.2726, -3.0582, -3.1007, -3.4533, -3.4695, -3.0998] )
# fmt: on
snake_case_ = self._load_datasamples(1 )
snake_case_ = SpeechTaFeatureExtractor()
snake_case_ = feature_extractor(audio_target=_UpperCamelCase , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 3_6_6, 8_0) )
self.assertTrue(torch.allclose(input_values[0, 0, :3_0] , _UpperCamelCase , atol=1e-4 ) ) | 39 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''huggingface/informer-tourism-monthly''': (
'''https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'''
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = "informer"
SCREAMING_SNAKE_CASE : int = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self : Dict , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : str = "student_t" , _UpperCamelCase : str = "nll" , _UpperCamelCase : int = 1 , _UpperCamelCase : List[int] = None , _UpperCamelCase : Optional[Union[str, bool]] = "mean" , _UpperCamelCase : int = 0 , _UpperCamelCase : int = 0 , _UpperCamelCase : int = 0 , _UpperCamelCase : int = 0 , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : int = 6_4 , _UpperCamelCase : int = 3_2 , _UpperCamelCase : int = 3_2 , _UpperCamelCase : int = 2 , _UpperCamelCase : int = 2 , _UpperCamelCase : int = 2 , _UpperCamelCase : int = 2 , _UpperCamelCase : bool = True , _UpperCamelCase : str = "gelu" , _UpperCamelCase : float = 0.05 , _UpperCamelCase : float = 0.1 , _UpperCamelCase : float = 0.1 , _UpperCamelCase : float = 0.1 , _UpperCamelCase : float = 0.1 , _UpperCamelCase : int = 1_0_0 , _UpperCamelCase : float = 0.02 , _UpperCamelCase : Dict=True , _UpperCamelCase : str = "prob" , _UpperCamelCase : int = 5 , _UpperCamelCase : bool = True , **_UpperCamelCase : Optional[Any] , ) ->Optional[int]:
# time series specific configuration
snake_case_ = prediction_length
snake_case_ = context_length or prediction_length
snake_case_ = distribution_output
snake_case_ = loss
snake_case_ = input_size
snake_case_ = num_time_features
snake_case_ = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
snake_case_ = scaling
snake_case_ = num_dynamic_real_features
snake_case_ = num_static_real_features
snake_case_ = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(_UpperCamelCase ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
snake_case_ = cardinality
else:
snake_case_ = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(_UpperCamelCase ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
snake_case_ = embedding_dimension
else:
snake_case_ = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
snake_case_ = num_parallel_samples
# Transformer architecture configuration
snake_case_ = input_size * len(self.lags_sequence ) + self._number_of_features
snake_case_ = d_model
snake_case_ = encoder_attention_heads
snake_case_ = decoder_attention_heads
snake_case_ = encoder_ffn_dim
snake_case_ = decoder_ffn_dim
snake_case_ = encoder_layers
snake_case_ = decoder_layers
snake_case_ = dropout
snake_case_ = attention_dropout
snake_case_ = activation_dropout
snake_case_ = encoder_layerdrop
snake_case_ = decoder_layerdrop
snake_case_ = activation_function
snake_case_ = init_std
snake_case_ = use_cache
# Informer
snake_case_ = attention_type
snake_case_ = sampling_factor
snake_case_ = distil
super().__init__(is_encoder_decoder=_UpperCamelCase , **_UpperCamelCase )
@property
def snake_case__( self : Optional[Any] ) ->int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
) | 39 | 1 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case__( self : Optional[Any] ) ->Any:
snake_case_ = AutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' , return_dict=_UpperCamelCase ).to(_UpperCamelCase )
snake_case_ = AutoTokenizer.from_pretrained('''google/mt5-small''' )
snake_case_ = tokenizer('''Hello there''' , return_tensors='''pt''' ).input_ids
snake_case_ = tokenizer('''Hi I am''' , return_tensors='''pt''' ).input_ids
snake_case_ = model(input_ids.to(_UpperCamelCase ) , labels=labels.to(_UpperCamelCase ) ).loss
snake_case_ = -(labels.shape[-1] * loss.item())
snake_case_ = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 ) | 39 |
import cmath
import math
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = math.radians(SCREAMING_SNAKE_CASE__ )
snake_case_ = math.radians(SCREAMING_SNAKE_CASE__ )
# Convert voltage and current to rectangular form
snake_case_ = cmath.rect(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ = cmath.rect(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod() | 39 | 1 |
from abc import ABC, abstractmethod
from typing import List, Optional
class snake_case_ ( __A ):
'''simple docstring'''
def __init__( self : int ) ->Optional[Any]:
# test for the above condition
self.test()
def snake_case__( self : int ) ->str:
snake_case_ = 0
snake_case_ = False
while not completed:
if counter == 1:
self.reset()
snake_case_ = self.advance()
if not self.does_advance(_UpperCamelCase ):
raise Exception(
'''Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.''' )
snake_case_, snake_case_, snake_case_ = self.update(_UpperCamelCase )
counter += 1
if counter > 1_0_0_0_0:
raise Exception('''update() does not fulfill the constraint.''' )
if self.remaining() != 0:
raise Exception('''Custom Constraint is not defined correctly.''' )
@abstractmethod
def snake_case__( self : List[Any] ) ->Union[str, Any]:
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def snake_case__( self : int , _UpperCamelCase : int ) ->List[str]:
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def snake_case__( self : Union[str, Any] , _UpperCamelCase : int ) ->int:
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def snake_case__( self : int ) ->str:
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def snake_case__( self : int ) ->str:
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def snake_case__( self : List[str] , _UpperCamelCase : List[Any]=False ) ->List[Any]:
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class snake_case_ ( __A ):
'''simple docstring'''
def __init__( self : Optional[Any] , _UpperCamelCase : List[int] ) ->Dict:
super(_UpperCamelCase , self ).__init__()
if not isinstance(_UpperCamelCase , _UpperCamelCase ) or len(_UpperCamelCase ) == 0:
raise ValueError(f'''`token_ids` has to be a non-empty list, but is {token_ids}.''' )
if any((not isinstance(_UpperCamelCase , _UpperCamelCase ) or token_id < 0) for token_id in token_ids ):
raise ValueError(f'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' )
snake_case_ = token_ids
snake_case_ = len(self.token_ids )
snake_case_ = -1 # the index of the currently fulfilled step
snake_case_ = False
def snake_case__( self : Dict ) ->Dict:
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def snake_case__( self : Union[str, Any] , _UpperCamelCase : int ) ->Optional[Any]:
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(_UpperCamelCase )}''' )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def snake_case__( self : Union[str, Any] , _UpperCamelCase : int ) ->int:
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(_UpperCamelCase )}''' )
snake_case_ = False
snake_case_ = False
snake_case_ = False
if self.does_advance(_UpperCamelCase ):
self.fulfilled_idx += 1
snake_case_ = True
if self.fulfilled_idx == (self.seqlen - 1):
snake_case_ = True
snake_case_ = completed
else:
# failed to make progress.
snake_case_ = True
self.reset()
return stepped, completed, reset
def snake_case__( self : Any ) ->Union[str, Any]:
snake_case_ = False
snake_case_ = 0
def snake_case__( self : Union[str, Any] ) ->int:
return self.seqlen - (self.fulfilled_idx + 1)
def snake_case__( self : str , _UpperCamelCase : Union[str, Any]=False ) ->int:
snake_case_ = PhrasalConstraint(self.token_ids )
if stateful:
snake_case_ = self.seqlen
snake_case_ = self.fulfilled_idx
snake_case_ = self.completed
return new_constraint
class snake_case_ :
'''simple docstring'''
def __init__( self : List[str] , _UpperCamelCase : List[List[int]] , _UpperCamelCase : List[Any]=True ) ->str:
snake_case_ = max([len(_UpperCamelCase ) for one in nested_token_ids] )
snake_case_ = {}
for token_ids in nested_token_ids:
snake_case_ = root
for tidx, token_id in enumerate(_UpperCamelCase ):
if token_id not in level:
snake_case_ = {}
snake_case_ = level[token_id]
if no_subsets and self.has_subsets(_UpperCamelCase , _UpperCamelCase ):
raise ValueError(
'''Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'''
f''' {nested_token_ids}.''' )
snake_case_ = root
def snake_case__( self : Any , _UpperCamelCase : List[Any] ) ->Optional[Any]:
snake_case_ = self.trie
for current_token in current_seq:
snake_case_ = start[current_token]
snake_case_ = list(start.keys() )
return next_tokens
def snake_case__( self : Optional[int] , _UpperCamelCase : int ) ->Optional[int]:
snake_case_ = self.next_tokens(_UpperCamelCase )
return len(_UpperCamelCase ) == 0
def snake_case__( self : List[Any] , _UpperCamelCase : List[Any] ) ->Dict:
snake_case_ = list(root.values() )
if len(_UpperCamelCase ) == 0:
return 1
else:
return sum([self.count_leaves(_UpperCamelCase ) for nn in next_nodes] )
def snake_case__( self : Union[str, Any] , _UpperCamelCase : Dict , _UpperCamelCase : Union[str, Any] ) ->int:
snake_case_ = self.count_leaves(_UpperCamelCase )
return len(_UpperCamelCase ) != leaf_count
class snake_case_ ( __A ):
'''simple docstring'''
def __init__( self : Optional[int] , _UpperCamelCase : List[List[int]] ) ->Any:
super(_UpperCamelCase , self ).__init__()
if not isinstance(_UpperCamelCase , _UpperCamelCase ) or len(_UpperCamelCase ) == 0:
raise ValueError(f'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' )
if any(not isinstance(_UpperCamelCase , _UpperCamelCase ) for token_ids in nested_token_ids ):
raise ValueError(f'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' )
if any(
any((not isinstance(_UpperCamelCase , _UpperCamelCase ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
f'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' )
snake_case_ = DisjunctiveTrie(_UpperCamelCase )
snake_case_ = nested_token_ids
snake_case_ = self.trie.max_height
snake_case_ = []
snake_case_ = False
def snake_case__( self : Optional[int] ) ->Optional[int]:
snake_case_ = self.trie.next_tokens(self.current_seq )
if len(_UpperCamelCase ) == 0:
return None
else:
return token_list
def snake_case__( self : Dict , _UpperCamelCase : int ) ->Dict:
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(_UpperCamelCase )}''' )
snake_case_ = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def snake_case__( self : Tuple , _UpperCamelCase : int ) ->Optional[Any]:
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(_UpperCamelCase )}''' )
snake_case_ = False
snake_case_ = False
snake_case_ = False
if self.does_advance(_UpperCamelCase ):
self.current_seq.append(_UpperCamelCase )
snake_case_ = True
else:
snake_case_ = True
self.reset()
snake_case_ = self.trie.reached_leaf(self.current_seq )
snake_case_ = completed
return stepped, completed, reset
def snake_case__( self : List[Any] ) ->str:
snake_case_ = False
snake_case_ = []
def snake_case__( self : Tuple ) ->Dict:
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def snake_case__( self : Union[str, Any] , _UpperCamelCase : List[Any]=False ) ->Optional[int]:
snake_case_ = DisjunctiveConstraint(self.token_ids )
if stateful:
snake_case_ = self.seqlen
snake_case_ = self.current_seq
snake_case_ = self.completed
return new_constraint
class snake_case_ :
'''simple docstring'''
def __init__( self : Tuple , _UpperCamelCase : List[Constraint] ) ->str:
snake_case_ = constraints
# max # of steps required to fulfill a given constraint
snake_case_ = max([c.seqlen for c in constraints] )
snake_case_ = len(_UpperCamelCase )
snake_case_ = False
self.init_state()
def snake_case__( self : Tuple ) ->Dict:
snake_case_ = []
snake_case_ = None
snake_case_ = [constraint.copy(stateful=_UpperCamelCase ) for constraint in self.constraints]
def snake_case__( self : Tuple ) ->int:
snake_case_ = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def snake_case__( self : Optional[Any] ) ->Optional[Any]:
snake_case_ = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
snake_case_ = constraint.advance()
if isinstance(_UpperCamelCase , _UpperCamelCase ):
token_list.append(_UpperCamelCase )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
token_list.extend(_UpperCamelCase )
else:
snake_case_ = self.inprogress_constraint.advance()
if isinstance(_UpperCamelCase , _UpperCamelCase ):
token_list.append(_UpperCamelCase )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
token_list.extend(_UpperCamelCase )
if len(_UpperCamelCase ) == 0:
return None
else:
return token_list
def snake_case__( self : Dict , _UpperCamelCase : Optional[List[int]] ) ->List[Any]:
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
snake_case_, snake_case_ = self.add(_UpperCamelCase )
# the entire list of constraints are fulfilled
if self.completed:
break
def snake_case__( self : Optional[int] , _UpperCamelCase : int ) ->List[Any]:
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError(f'''`token_id` should be an `int`, but is `{token_id}`.''' )
snake_case_, snake_case_ = False, False
if self.completed:
snake_case_ = True
snake_case_ = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
snake_case_, snake_case_, snake_case_ = self.inprogress_constraint.update(_UpperCamelCase )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=_UpperCamelCase ) )
snake_case_ = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
snake_case_ = None
if len(self.pending_constraints ) == 0:
# we're done!
snake_case_ = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(_UpperCamelCase ):
snake_case_, snake_case_, snake_case_ = pending_constraint.update(_UpperCamelCase )
if not stepped:
raise Exception(
'''`constraint.update(token_id)` is not yielding incremental progress, '''
'''even though `constraint.does_advance(token_id)` is true.''' )
if complete:
self.complete_constraints.append(_UpperCamelCase )
snake_case_ = None
if not complete and stepped:
snake_case_ = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
snake_case_ = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
snake_case_ = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def snake_case__( self : int , _UpperCamelCase : List[str]=True ) ->Optional[Any]:
snake_case_ = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
snake_case_ = [
constraint.copy(stateful=_UpperCamelCase ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
snake_case_ = self.inprogress_constraint.copy(stateful=_UpperCamelCase )
snake_case_ = [constraint.copy() for constraint in self.pending_constraints]
return new_state | 39 |
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case_ :
'''simple docstring'''
def __init__( self : Optional[int] , _UpperCamelCase : Tuple , _UpperCamelCase : Optional[int]=1_3 , _UpperCamelCase : str=7 , _UpperCamelCase : int=True , _UpperCamelCase : Dict=True , _UpperCamelCase : int=False , _UpperCamelCase : Dict=True , _UpperCamelCase : Optional[int]=9_9 , _UpperCamelCase : str=3_2 , _UpperCamelCase : str=5 , _UpperCamelCase : str=4 , _UpperCamelCase : int=3_7 , _UpperCamelCase : int="gelu" , _UpperCamelCase : List[str]=0.1 , _UpperCamelCase : Dict=0.1 , _UpperCamelCase : str=5_1_2 , _UpperCamelCase : Optional[int]=1_6 , _UpperCamelCase : List[str]=2 , _UpperCamelCase : Any=0.02 , _UpperCamelCase : List[str]=3 , _UpperCamelCase : List[str]=4 , _UpperCamelCase : str=None , ) ->Dict:
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = num_choices
snake_case_ = scope
def snake_case__( self : str ) ->List[Any]:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_input_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ = None
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__( self : List[str] ) ->Tuple:
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , )
def snake_case__( self : int , _UpperCamelCase : int , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Any , _UpperCamelCase : List[str] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Union[str, Any] ) ->Dict:
snake_case_ = BioGptModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase )
snake_case_ = model(_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__( self : Optional[Any] , _UpperCamelCase : Dict , _UpperCamelCase : List[str] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : Optional[int] , _UpperCamelCase : Union[str, Any] , ) ->Optional[int]:
snake_case_ = BioGptForCausalLM(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__( self : Dict , _UpperCamelCase : str , _UpperCamelCase : List[str] , _UpperCamelCase : List[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str , *_UpperCamelCase : List[Any] ) ->Union[str, Any]:
snake_case_ = BioGptModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
# create attention mask
snake_case_ = torch.ones(input_ids.shape , dtype=torch.long , device=_UpperCamelCase )
snake_case_ = self.seq_length // 2
snake_case_ = 0
# first forward pass
snake_case_, snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase ).to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case_ = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
snake_case_ = ids_tensor((1,) , _UpperCamelCase ).item() + 1
snake_case_ = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
snake_case_ = random_other_next_tokens
# append to next input_ids and attn_mask
snake_case_ = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case_ = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=_UpperCamelCase )] , dim=1 , )
# get two different outputs
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase )['''last_hidden_state''']
snake_case_ = model(_UpperCamelCase , past_key_values=_UpperCamelCase , attention_mask=_UpperCamelCase )['''last_hidden_state''']
# select random slice
snake_case_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case_ = output_from_no_past[:, -1, random_slice_idx].detach()
snake_case_ = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-3 ) )
def snake_case__( self : Union[str, Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Dict , *_UpperCamelCase : List[Any] ) ->int:
snake_case_ = BioGptModel(config=_UpperCamelCase ).to(_UpperCamelCase ).eval()
snake_case_ = torch.ones(input_ids.shape , dtype=torch.long , device=_UpperCamelCase )
# first forward pass
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase , use_cache=_UpperCamelCase )
snake_case_, snake_case_ = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
snake_case_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case_ = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
snake_case_ = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case_ = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase )['''last_hidden_state''']
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase , past_key_values=_UpperCamelCase )[
'''last_hidden_state'''
]
# select random slice
snake_case_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case_ = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-3 ) )
def snake_case__( self : int , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str , _UpperCamelCase : str , _UpperCamelCase : Dict , _UpperCamelCase : Optional[Any] , *_UpperCamelCase : List[Any] , _UpperCamelCase : List[str]=False ) ->Dict:
snake_case_ = BioGptForCausalLM(_UpperCamelCase )
model.to(_UpperCamelCase )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
snake_case_ = model(_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def snake_case__( self : List[Any] , _UpperCamelCase : Optional[int] , *_UpperCamelCase : Dict ) ->Dict:
snake_case_ = BioGptModel(_UpperCamelCase )
snake_case_ = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def snake_case__( self : Any , _UpperCamelCase : Tuple , _UpperCamelCase : List[str] , _UpperCamelCase : List[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : int , *_UpperCamelCase : List[str] ) ->int:
snake_case_ = self.num_labels
snake_case_ = BioGptForTokenClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case__( self : Optional[Any] ) ->int:
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
),
) = config_and_inputs
snake_case_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class snake_case_ ( __A , __A , __A , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE : Tuple = (BioGptForCausalLM,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE : Optional[Any] = (
{
"feature-extraction": BioGptModel,
"text-classification": BioGptForSequenceClassification,
"text-generation": BioGptForCausalLM,
"token-classification": BioGptForTokenClassification,
"zero-shot": BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE : Tuple = False
def snake_case__( self : List[str] ) ->Union[str, Any]:
snake_case_ = BioGptModelTester(self )
snake_case_ = ConfigTester(self , config_class=_UpperCamelCase , hidden_size=3_7 )
def snake_case__( self : str ) ->int:
self.config_tester.run_common_tests()
def snake_case__( self : str ) ->Tuple:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def snake_case__( self : Tuple ) ->List[Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case_ = type
self.model_tester.create_and_check_model(*_UpperCamelCase )
def snake_case__( self : Tuple ) ->str:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*_UpperCamelCase )
def snake_case__( self : Union[str, Any] ) ->Dict:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*_UpperCamelCase , gradient_checkpointing=_UpperCamelCase )
def snake_case__( self : Optional[int] ) ->List[Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*_UpperCamelCase )
def snake_case__( self : List[Any] ) ->Union[str, Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*_UpperCamelCase )
def snake_case__( self : Optional[int] ) ->Optional[int]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*_UpperCamelCase )
@slow
def snake_case__( self : int ) ->Optional[Any]:
snake_case_ = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(_UpperCamelCase )
snake_case_ = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
snake_case_ = '''left'''
# Define PAD Token = EOS Token = 50256
snake_case_ = tokenizer.eos_token
snake_case_ = model.config.eos_token_id
# use different length sentences to test batching
snake_case_ = [
'''Hello, my dog is a little''',
'''Today, I''',
]
snake_case_ = tokenizer(_UpperCamelCase , return_tensors='''pt''' , padding=_UpperCamelCase )
snake_case_ = inputs['''input_ids'''].to(_UpperCamelCase )
snake_case_ = model.generate(
input_ids=_UpperCamelCase , attention_mask=inputs['''attention_mask'''].to(_UpperCamelCase ) , )
snake_case_ = tokenizer(sentences[0] , return_tensors='''pt''' ).input_ids.to(_UpperCamelCase )
snake_case_ = model.generate(input_ids=_UpperCamelCase )
snake_case_ = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item()
snake_case_ = tokenizer(sentences[1] , return_tensors='''pt''' ).input_ids.to(_UpperCamelCase )
snake_case_ = model.generate(input_ids=_UpperCamelCase , max_length=model.config.max_length - num_paddings )
snake_case_ = tokenizer.batch_decode(_UpperCamelCase , skip_special_tokens=_UpperCamelCase )
snake_case_ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=_UpperCamelCase )
snake_case_ = tokenizer.decode(output_padded[0] , skip_special_tokens=_UpperCamelCase )
snake_case_ = [
'''Hello, my dog is a little bit bigger than a little bit.''',
'''Today, I have a good idea of how to use the information''',
]
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
self.assertListEqual(_UpperCamelCase , [non_padded_sentence, padded_sentence] )
@slow
def snake_case__( self : Optional[int] ) ->List[str]:
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = BioGptModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def snake_case__( self : Optional[int] ) ->str:
snake_case_, snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = 3
snake_case_ = input_dict['''input_ids''']
snake_case_ = input_ids.ne(1 ).to(_UpperCamelCase )
snake_case_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
snake_case_ = BioGptForSequenceClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase , labels=_UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case__( self : str ) ->str:
snake_case_, snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = 3
snake_case_ = '''multi_label_classification'''
snake_case_ = input_dict['''input_ids''']
snake_case_ = input_ids.ne(1 ).to(_UpperCamelCase )
snake_case_ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
snake_case_ = BioGptForSequenceClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase , labels=_UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case__( self : int ) ->Any:
snake_case_ = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
snake_case_ = torch.tensor([[2, 4_8_0_5, 9, 6_5_6, 2_1]] )
snake_case_ = model(_UpperCamelCase )[0]
snake_case_ = 4_2_3_8_4
snake_case_ = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , _UpperCamelCase )
snake_case_ = torch.tensor(
[[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCamelCase , atol=1e-4 ) )
@slow
def snake_case__( self : List[str] ) ->Optional[int]:
snake_case_ = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
snake_case_ = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(_UpperCamelCase )
torch.manual_seed(0 )
snake_case_ = tokenizer('''COVID-19 is''' , return_tensors='''pt''' ).to(_UpperCamelCase )
snake_case_ = model.generate(
**_UpperCamelCase , min_length=1_0_0 , max_length=1_0_2_4 , num_beams=5 , early_stopping=_UpperCamelCase , )
snake_case_ = tokenizer.decode(output_ids[0] , skip_special_tokens=_UpperCamelCase )
snake_case_ = (
'''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'''
''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'''
''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'''
''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'''
''' more than 800,000 deaths.'''
)
self.assertEqual(_UpperCamelCase , _UpperCamelCase ) | 39 | 1 |
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor | 39 |
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
snake_case_ = (boundary[1] - boundary[0]) / steps
snake_case_ = boundary[0]
snake_case_ = boundary[1]
snake_case_ = make_points(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ = 0.0
y += (h / 2.0) * f(SCREAMING_SNAKE_CASE__ )
for i in x_i:
# print(i)
y += h * f(SCREAMING_SNAKE_CASE__ )
y += (h / 2.0) * f(SCREAMING_SNAKE_CASE__ )
return y
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = a + h
while x < (b - h):
yield x
snake_case_ = x + h
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): # enter your function here
snake_case_ = (x - 0) * (x - 0)
return y
def __SCREAMING_SNAKE_CASE ():
snake_case_ = 0.0 # Lower bound of integration
snake_case_ = 1.0 # Upper bound of integration
snake_case_ = 10.0 # define number of steps or resolution
snake_case_ = [a, b] # define boundary of integration
snake_case_ = method_a(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
print(F'''y = {y}''' )
if __name__ == "__main__":
main() | 39 | 1 |
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = np.full((len(SCREAMING_SNAKE_CASE__ ), sequence_length, 2) , SCREAMING_SNAKE_CASE__ )
else:
snake_case_ = np.full((len(SCREAMING_SNAKE_CASE__ ), sequence_length) , SCREAMING_SNAKE_CASE__ )
for i, tensor in enumerate(SCREAMING_SNAKE_CASE__ ):
if padding_side == "right":
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = tensor[:sequence_length]
else:
snake_case_ = tensor[:sequence_length]
else:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = tensor[:sequence_length]
else:
snake_case_ = tensor[:sequence_length]
return out_tensor.tolist()
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = ord(SCREAMING_SNAKE_CASE__ )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
snake_case_ = unicodedata.category(SCREAMING_SNAKE_CASE__ )
if cat.startswith('''P''' ):
return True
return False
@dataclass
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : PreTrainedTokenizerBase
SCREAMING_SNAKE_CASE : Union[bool, str, PaddingStrategy] = True
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : int = -100
SCREAMING_SNAKE_CASE : str = "pt"
def snake_case__( self : Optional[int] , _UpperCamelCase : str ) ->str:
import torch
snake_case_ = '''label''' if '''label''' in features[0].keys() else '''labels'''
snake_case_ = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
snake_case_ = self.tokenizer.pad(
_UpperCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , )
if labels is None:
return batch
snake_case_ = torch.tensor(batch['''entity_ids'''] ).shape[1]
snake_case_ = self.tokenizer.padding_side
if padding_side == "right":
snake_case_ = [
list(_UpperCamelCase ) + [self.label_pad_token_id] * (sequence_length - len(_UpperCamelCase )) for label in labels
]
else:
snake_case_ = [
[self.label_pad_token_id] * (sequence_length - len(_UpperCamelCase )) + list(_UpperCamelCase ) for label in labels
]
snake_case_ = [feature['''ner_tags'''] for feature in features]
snake_case_ = padding_tensor(_UpperCamelCase , -1 , _UpperCamelCase , _UpperCamelCase )
snake_case_ = [feature['''original_entity_spans'''] for feature in features]
snake_case_ = padding_tensor(_UpperCamelCase , (-1, -1) , _UpperCamelCase , _UpperCamelCase )
snake_case_ = {k: torch.tensor(_UpperCamelCase , dtype=torch.intaa ) for k, v in batch.items()}
return batch | 39 |
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
lowerCAmelCase_ = get_logger(__name__)
lowerCAmelCase_ = Path(__file__).parent / '''model_card_template.md'''
lowerCAmelCase_ = uuida().hex
lowerCAmelCase_ = os.getenv('''HF_HUB_OFFLINE''', '''''').upper() in ENV_VARS_TRUE_VALUES
lowerCAmelCase_ = os.getenv('''DISABLE_TELEMETRY''', '''''').upper() in ENV_VARS_TRUE_VALUES
lowerCAmelCase_ = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '''/api/telemetry/'''
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = None ):
snake_case_ = F'''diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}'''
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += F'''; torch/{_torch_version}'''
if is_flax_available():
ua += F'''; jax/{_jax_version}'''
ua += F'''; flax/{_flax_version}'''
if is_onnx_available():
ua += F'''; onnxruntime/{_onnxruntime_version}'''
# CI will set this value to True
if os.environ.get('''DIFFUSERS_IS_CI''' , '''''' ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
ua += "; " + "; ".join(F'''{k}/{v}''' for k, v in user_agent.items() )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
ua += "; " + user_agent
return ua
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None ):
if token is None:
snake_case_ = HfFolder.get_token()
if organization is None:
snake_case_ = whoami(SCREAMING_SNAKE_CASE__ )['''name''']
return F'''{username}/{model_id}'''
else:
return F'''{organization}/{model_id}'''
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if not is_jinja_available():
raise ValueError(
'''Modelcard rendering is based on Jinja templates.'''
''' Please make sure to have `jinja` installed before using `create_model_card`.'''
''' To install it, please run `pip install Jinja2`.''' )
if hasattr(SCREAMING_SNAKE_CASE__ , '''local_rank''' ) and args.local_rank not in [-1, 0]:
return
snake_case_ = args.hub_token if hasattr(SCREAMING_SNAKE_CASE__ , '''hub_token''' ) else None
snake_case_ = get_full_repo_name(SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ )
snake_case_ = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='''en''' , license='''apache-2.0''' , library_name='''diffusers''' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=SCREAMING_SNAKE_CASE__ , model_name=SCREAMING_SNAKE_CASE__ , repo_name=SCREAMING_SNAKE_CASE__ , dataset_name=args.dataset_name if hasattr(SCREAMING_SNAKE_CASE__ , '''dataset_name''' ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(SCREAMING_SNAKE_CASE__ , '''gradient_accumulation_steps''' ) else None
) , adam_betaa=args.adam_betaa if hasattr(SCREAMING_SNAKE_CASE__ , '''adam_beta1''' ) else None , adam_betaa=args.adam_betaa if hasattr(SCREAMING_SNAKE_CASE__ , '''adam_beta2''' ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(SCREAMING_SNAKE_CASE__ , '''adam_weight_decay''' ) else None , adam_epsilon=args.adam_epsilon if hasattr(SCREAMING_SNAKE_CASE__ , '''adam_epsilon''' ) else None , lr_scheduler=args.lr_scheduler if hasattr(SCREAMING_SNAKE_CASE__ , '''lr_scheduler''' ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(SCREAMING_SNAKE_CASE__ , '''lr_warmup_steps''' ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(SCREAMING_SNAKE_CASE__ , '''ema_inv_gamma''' ) else None , ema_power=args.ema_power if hasattr(SCREAMING_SNAKE_CASE__ , '''ema_power''' ) else None , ema_max_decay=args.ema_max_decay if hasattr(SCREAMING_SNAKE_CASE__ , '''ema_max_decay''' ) else None , mixed_precision=args.mixed_precision , )
snake_case_ = os.path.join(args.output_dir , '''README.md''' )
model_card.save(SCREAMING_SNAKE_CASE__ )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
if resolved_file is None or commit_hash is not None:
return commit_hash
snake_case_ = str(Path(SCREAMING_SNAKE_CASE__ ).as_posix() )
snake_case_ = re.search(R'''snapshots/([^/]+)/''' , SCREAMING_SNAKE_CASE__ )
if search is None:
return None
snake_case_ = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(SCREAMING_SNAKE_CASE__ ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
lowerCAmelCase_ = os.path.expanduser(
os.getenv('''HF_HOME''', os.path.join(os.getenv('''XDG_CACHE_HOME''', '''~/.cache'''), '''huggingface'''))
)
lowerCAmelCase_ = os.path.join(hf_cache_home, '''diffusers''')
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None ):
if new_cache_dir is None:
snake_case_ = DIFFUSERS_CACHE
if old_cache_dir is None:
snake_case_ = old_diffusers_cache
snake_case_ = Path(SCREAMING_SNAKE_CASE__ ).expanduser()
snake_case_ = Path(SCREAMING_SNAKE_CASE__ ).expanduser()
for old_blob_path in old_cache_dir.glob('''**/blobs/*''' ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
snake_case_ = new_cache_dir / old_blob_path.relative_to(SCREAMING_SNAKE_CASE__ )
new_blob_path.parent.mkdir(parents=SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
os.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
try:
os.symlink(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
except OSError:
logger.warning(
'''Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.''' )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
lowerCAmelCase_ = os.path.join(DIFFUSERS_CACHE, '''version_diffusers_cache.txt''')
if not os.path.isfile(cache_version_file):
lowerCAmelCase_ = 0
else:
with open(cache_version_file) as f:
try:
lowerCAmelCase_ = int(f.read())
except ValueError:
lowerCAmelCase_ = 0
if cache_version < 1:
lowerCAmelCase_ = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'''The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '''
'''existing cached models. This is a one-time operation, you can interrupt it or run it '''
'''later by calling `diffusers.utils.hub_utils.move_cache()`.'''
)
try:
move_cache()
except Exception as e:
lowerCAmelCase_ = '''\n'''.join(traceback.format_tb(e.__traceback__))
logger.error(
f"""There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease """
'''file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '''
'''message and we will do our best to help.'''
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, '''w''') as f:
f.write('''1''')
except Exception:
logger.warning(
f"""There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure """
'''the directory exists and can be written to.'''
)
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
if variant is not None:
snake_case_ = weights_name.split('''.''' )
snake_case_ = splits[:-1] + [variant] + splits[-1:]
snake_case_ = '''.'''.join(SCREAMING_SNAKE_CASE__ )
return weights_name
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , *,
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , ):
snake_case_ = str(SCREAMING_SNAKE_CASE__ )
if os.path.isfile(SCREAMING_SNAKE_CASE__ ):
return pretrained_model_name_or_path
elif os.path.isdir(SCREAMING_SNAKE_CASE__ ):
if os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ):
# Load from a PyTorch checkpoint
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ):
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return model_file
else:
raise EnvironmentError(
F'''Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.''' )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(SCREAMING_SNAKE_CASE__ ).base_version ) >= version.parse('''0.20.0''' )
):
try:
snake_case_ = hf_hub_download(
SCREAMING_SNAKE_CASE__ , filename=_add_variant(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , cache_dir=SCREAMING_SNAKE_CASE__ , force_download=SCREAMING_SNAKE_CASE__ , proxies=SCREAMING_SNAKE_CASE__ , resume_download=SCREAMING_SNAKE_CASE__ , local_files_only=SCREAMING_SNAKE_CASE__ , use_auth_token=SCREAMING_SNAKE_CASE__ , user_agent=SCREAMING_SNAKE_CASE__ , subfolder=SCREAMING_SNAKE_CASE__ , revision=revision or commit_hash , )
warnings.warn(
F'''Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.''' , SCREAMING_SNAKE_CASE__ , )
return model_file
except: # noqa: E722
warnings.warn(
F'''You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}\' so that the correct variant file can be added.''' , SCREAMING_SNAKE_CASE__ , )
try:
# 2. Load model file as usual
snake_case_ = hf_hub_download(
SCREAMING_SNAKE_CASE__ , filename=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , force_download=SCREAMING_SNAKE_CASE__ , proxies=SCREAMING_SNAKE_CASE__ , resume_download=SCREAMING_SNAKE_CASE__ , local_files_only=SCREAMING_SNAKE_CASE__ , use_auth_token=SCREAMING_SNAKE_CASE__ , user_agent=SCREAMING_SNAKE_CASE__ , subfolder=SCREAMING_SNAKE_CASE__ , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
F'''{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier '''
'''listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '''
'''token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '''
'''login`.''' )
except RevisionNotFoundError:
raise EnvironmentError(
F'''{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for '''
'''this model name. Check the model page at '''
F'''\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.''' )
except EntryNotFoundError:
raise EnvironmentError(
F'''{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.''' )
except HTTPError as err:
raise EnvironmentError(
F'''There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}''' )
except ValueError:
raise EnvironmentError(
F'''We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it'''
F''' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a'''
F''' directory containing a file named {weights_name} or'''
''' \nCheckout your internet connection or see how to run the library in'''
''' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.''' )
except EnvironmentError:
raise EnvironmentError(
F'''Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from '''
'''\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '''
F'''Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory '''
F'''containing a file named {weights_name}''' ) | 39 | 1 |
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
if length <= 0 or not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise ValueError('''Length must be a positive integer.''' )
return [n * (2 * n - 1) for n in range(SCREAMING_SNAKE_CASE__ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10)) | 39 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''Intel/dpt-large''': '''https://huggingface.co/Intel/dpt-large/resolve/main/config.json''',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = "dpt"
def __init__( self : Optional[Any] , _UpperCamelCase : Tuple=7_6_8 , _UpperCamelCase : Dict=1_2 , _UpperCamelCase : Union[str, Any]=1_2 , _UpperCamelCase : List[Any]=3_0_7_2 , _UpperCamelCase : Dict="gelu" , _UpperCamelCase : Union[str, Any]=0.0 , _UpperCamelCase : Optional[int]=0.0 , _UpperCamelCase : Optional[int]=0.02 , _UpperCamelCase : List[str]=1e-12 , _UpperCamelCase : Any=3_8_4 , _UpperCamelCase : int=1_6 , _UpperCamelCase : Any=3 , _UpperCamelCase : Dict=False , _UpperCamelCase : str=True , _UpperCamelCase : Union[str, Any]=[2, 5, 8, 1_1] , _UpperCamelCase : List[str]="project" , _UpperCamelCase : Optional[int]=[4, 2, 1, 0.5] , _UpperCamelCase : Dict=[9_6, 1_9_2, 3_8_4, 7_6_8] , _UpperCamelCase : Dict=2_5_6 , _UpperCamelCase : Optional[Any]=-1 , _UpperCamelCase : int=False , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : str=0.4 , _UpperCamelCase : Tuple=2_5_5 , _UpperCamelCase : Union[str, Any]=0.1 , _UpperCamelCase : Tuple=[1, 1_0_2_4, 2_4, 2_4] , _UpperCamelCase : List[str]=[0, 1] , _UpperCamelCase : List[Any]=None , **_UpperCamelCase : Dict , ) ->Any:
super().__init__(**_UpperCamelCase )
snake_case_ = hidden_size
snake_case_ = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info('''Initializing the config with a `BiT` backbone.''' )
snake_case_ = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
}
snake_case_ = BitConfig(**_UpperCamelCase )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
logger.info('''Initializing the config with a `BiT` backbone.''' )
snake_case_ = BitConfig(**_UpperCamelCase )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
snake_case_ = backbone_config
else:
raise ValueError(
f'''backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.''' )
snake_case_ = backbone_featmap_shape
snake_case_ = neck_ignore_stages
if readout_type != "project":
raise ValueError('''Readout type must be \'project\' when using `DPT-hybrid` mode.''' )
else:
snake_case_ = None
snake_case_ = None
snake_case_ = []
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = qkv_bias
snake_case_ = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError('''Readout_type must be one of [\'ignore\', \'add\', \'project\']''' )
snake_case_ = readout_type
snake_case_ = reassemble_factors
snake_case_ = neck_hidden_sizes
snake_case_ = fusion_hidden_size
snake_case_ = head_in_index
snake_case_ = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
snake_case_ = use_auxiliary_head
snake_case_ = auxiliary_loss_weight
snake_case_ = semantic_loss_ignore_index
snake_case_ = semantic_classifier_dropout
def snake_case__( self : List[str] ) ->List[Any]:
snake_case_ = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
snake_case_ = self.backbone_config.to_dict()
snake_case_ = self.__class__.model_type
return output | 39 | 1 |
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class snake_case_ :
'''simple docstring'''
@staticmethod
def snake_case__( *_UpperCamelCase : List[str] , **_UpperCamelCase : Dict ) ->List[str]:
pass
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def snake_case__( self : List[str] , _UpperCamelCase : List[str] , _UpperCamelCase : int , _UpperCamelCase : Optional[int] ) ->int:
snake_case_ = DepthEstimationPipeline(model=_UpperCamelCase , image_processor=_UpperCamelCase )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def snake_case__( self : Dict , _UpperCamelCase : List[Any] , _UpperCamelCase : Tuple ) ->Optional[int]:
snake_case_ = depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
self.assertEqual({'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )} , _UpperCamelCase )
import datasets
snake_case_ = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' )
snake_case_ = depth_estimator(
[
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
] )
self.assertEqual(
[
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
] , _UpperCamelCase , )
@require_tf
@unittest.skip('''Depth estimation is not implemented in TF''' )
def snake_case__( self : Optional[Any] ) ->Union[str, Any]:
pass
@slow
@require_torch
def snake_case__( self : Any ) ->List[Any]:
snake_case_ = '''Intel/dpt-large'''
snake_case_ = pipeline('''depth-estimation''' , model=_UpperCamelCase )
snake_case_ = depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
snake_case_ = hashimage(outputs['''depth'''] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item() ) , 2.662 )
@require_torch
def snake_case__( self : Optional[int] ) ->str:
# This is highly irregular to have no small tests.
self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''' ) | 39 |
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
lowerCAmelCase_ = logging.getLogger(__name__)
lowerCAmelCase_ = '''pytorch_model.bin'''
@dataclasses.dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = dataclasses.field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models."} )
SCREAMING_SNAKE_CASE : Optional[str] = dataclasses.field(
default=__A , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co."} , )
@dataclasses.dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = dataclasses.field(metadata={"help": "A csv or a json file containing the training data."} )
SCREAMING_SNAKE_CASE : str = dataclasses.field(metadata={"help": "A csv or a json file containing the data to predict on."} )
SCREAMING_SNAKE_CASE : Optional[str] = dataclasses.field(
default=__A , metadata={"help": "A csv or a json file containing the validation data."} )
SCREAMING_SNAKE_CASE : Optional[str] = dataclasses.field(
default=__A , metadata={"help": "The name of the task to train on."} , )
SCREAMING_SNAKE_CASE : Optional[List[str]] = dataclasses.field(
default=__A , metadata={"help": "The list of labels for the task."} )
@dataclasses.dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = dataclasses.field(
metadata={"help": "The output directory where the model predictions and checkpoints will be written."} )
SCREAMING_SNAKE_CASE : Optional[str] = dataclasses.field(
default="accuracy" , metadata={"help": "The evaluation metric used for the task."} )
SCREAMING_SNAKE_CASE : Optional[str] = dataclasses.field(
default="no" , metadata={
"help": "The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]"
} , )
SCREAMING_SNAKE_CASE : Optional[int] = dataclasses.field(
default=10 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
SCREAMING_SNAKE_CASE : Optional[float] = dataclasses.field(
default=0.0 , metadata={
"help": "How much the specified evaluation metric must improve to satisfy early stopping conditions."
} , )
SCREAMING_SNAKE_CASE : Optional[bool] = dataclasses.field(
default=__A , metadata={"help": "Whether to filter the pseudo-labeled data based on the confidence score."} , )
SCREAMING_SNAKE_CASE : Optional[bool] = dataclasses.field(
default=__A , metadata={"help": "Whether to filter the pseudo-labeled data based on the validation performance."} , )
SCREAMING_SNAKE_CASE : Optional[bool] = dataclasses.field(
default=__A , metadata={"help": "Whether to fine-tune on labeled data after pseudo training."} , )
SCREAMING_SNAKE_CASE : Optional[float] = dataclasses.field(
default=0.0 , metadata={"help": "Confidence threshold for pseudo-labeled data filtering."} , )
SCREAMING_SNAKE_CASE : Optional[int] = dataclasses.field(
default=100 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
SCREAMING_SNAKE_CASE : Optional[int] = dataclasses.field(
default=__A , metadata={"help": "Random seed for initialization."} , )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
snake_case_ = dataset.filter(lambda SCREAMING_SNAKE_CASE__ : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
snake_case_ = int(eval_result * len(SCREAMING_SNAKE_CASE__ ) )
print(SCREAMING_SNAKE_CASE__ )
snake_case_ = dataset.sort('''probability''' , reverse=SCREAMING_SNAKE_CASE__ )
snake_case_ = dataset.select(range(SCREAMING_SNAKE_CASE__ ) )
snake_case_ = dataset.remove_columns(['''label''', '''probability'''] )
snake_case_ = dataset.rename_column('''prediction''' , '''label''' )
snake_case_ = dataset.map(lambda SCREAMING_SNAKE_CASE__ : {"label": idalabel[example["label"]]} )
snake_case_ = dataset.shuffle(seed=args.seed )
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , F'''train_pseudo.{args.data_file_extension}''' )
if args.data_file_extension == "csv":
dataset.to_csv(SCREAMING_SNAKE_CASE__ , index=SCREAMING_SNAKE_CASE__ )
else:
dataset.to_json(SCREAMING_SNAKE_CASE__ )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
snake_case_ = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
snake_case_ = STModelArguments(model_name_or_path=SCREAMING_SNAKE_CASE__ )
snake_case_ = STDataArguments(train_file=SCREAMING_SNAKE_CASE__ , infer_file=SCREAMING_SNAKE_CASE__ )
snake_case_ = STTrainingArguments(output_dir=SCREAMING_SNAKE_CASE__ )
snake_case_ = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(SCREAMING_SNAKE_CASE__ ).items():
setattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for key, value in kwargs.items():
if hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
setattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Sanity checks
snake_case_ = {}
snake_case_ = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
snake_case_ = args.train_file
snake_case_ = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
snake_case_ = args.eval_file
for key in data_files:
snake_case_ = data_files[key].split('''.''' )[-1]
assert extension in ["csv", "json"], F'''`{key}_file` should be a csv or a json file.'''
if args.data_file_extension is None:
snake_case_ = extension
else:
assert extension == args.data_file_extension, F'''`{key}_file` should be a {args.data_file_extension} file`.'''
assert (
args.eval_metric in datasets.list_metrics()
), F'''{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.'''
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info('''Creating the initial data directory for self-training...''' )
snake_case_ = F'''{args.output_dir}/self-train_iter-{{}}'''.format
snake_case_ = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=SCREAMING_SNAKE_CASE__ )
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
snake_case_ = None
snake_case_ = None
snake_case_ = 0
snake_case_ = False
# Show the progress bar
snake_case_ = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
snake_case_ = data_dir_format(SCREAMING_SNAKE_CASE__ )
assert os.path.exists(SCREAMING_SNAKE_CASE__ )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''stage-1''' )
snake_case_ = {
'''accelerator''': accelerator,
'''model_name_or_path''': args.model_name_or_path,
'''cache_dir''': args.cache_dir,
'''do_train''': True,
'''train_file''': data_files['''train'''] if iteration == 0 else data_files['''train_pseudo'''],
'''do_eval''': True if args.eval_file is not None else False,
'''eval_file''': data_files['''eval'''],
'''do_predict''': True,
'''infer_file''': data_files['''infer'''],
'''task_name''': args.task_name,
'''label_list''': args.label_list,
'''output_dir''': current_output_dir,
'''eval_metric''': args.eval_metric,
'''evaluation_strategy''': args.evaluation_strategy,
'''early_stopping_patience''': args.early_stopping_patience,
'''early_stopping_threshold''': args.early_stopping_threshold,
'''seed''': args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
arguments_dict.update({key: value} )
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''best-checkpoint''' , SCREAMING_SNAKE_CASE__ )
if os.path.exists(SCREAMING_SNAKE_CASE__ ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.''' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 1 *****''' , SCREAMING_SNAKE_CASE__ )
finetune(**SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
assert os.path.exists(SCREAMING_SNAKE_CASE__ )
logger.info('''Self-training job completed: iteration: %d, stage: 1.''' , SCREAMING_SNAKE_CASE__ )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''best-checkpoint''' )
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''stage-2''' )
# Update arguments_dict
snake_case_ = model_path
snake_case_ = data_files['''train''']
snake_case_ = current_output_dir
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''best-checkpoint''' , SCREAMING_SNAKE_CASE__ )
if os.path.exists(SCREAMING_SNAKE_CASE__ ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.''' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 2 *****''' , SCREAMING_SNAKE_CASE__ )
finetune(**SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
assert os.path.exists(SCREAMING_SNAKE_CASE__ )
logger.info('''Self-training job completed: iteration: %d, stage: 2.''' , SCREAMING_SNAKE_CASE__ )
snake_case_ = iteration
snake_case_ = data_dir_format(iteration + 1 )
snake_case_ = AutoConfig.from_pretrained(os.path.join(SCREAMING_SNAKE_CASE__ , '''best-checkpoint''' ) )
snake_case_ = config.idalabel
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''eval_results_best-checkpoint.json''' )
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''test_results_best-checkpoint.json''' )
assert os.path.exists(SCREAMING_SNAKE_CASE__ )
with open(SCREAMING_SNAKE_CASE__ , '''r''' ) as f:
snake_case_ = float(json.load(SCREAMING_SNAKE_CASE__ )[args.eval_metric] )
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''infer_output_best-checkpoint.csv''' )
assert os.path.exists(SCREAMING_SNAKE_CASE__ )
# Loading the dataset from local csv or json files.
snake_case_ = load_dataset(args.data_file_extension , data_files={'''data''': data_files['''infer''']} )['''data''']
snake_case_ = load_dataset('''csv''' , data_files={'''data''': infer_output_file} )['''data''']
if accelerator.is_main_process:
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
shutil.copy(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , F'''eval_results_iter-{iteration}.json''' ) )
if os.path.exists(SCREAMING_SNAKE_CASE__ ):
shutil.copy(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , F'''test_results_iter-{iteration}.json''' ) )
create_pseudo_labeled_data(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , F'''train_pseudo.{args.data_file_extension}''' )
if args.evaluation_strategy != IntervalStrategy.NO.value:
snake_case_ = eval_result
if best_iteration is None:
snake_case_ = new_iteration
snake_case_ = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
snake_case_ = new_iteration
snake_case_ = new_eval_result
snake_case_ = 0
else:
if new_eval_result == best_eval_result:
snake_case_ = new_iteration
snake_case_ = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
snake_case_ = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info('''Best iteration: %d''' , SCREAMING_SNAKE_CASE__ )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(SCREAMING_SNAKE_CASE__ , F'''eval_results_iter-{iteration}.json''' ) , os.path.join(SCREAMING_SNAKE_CASE__ , '''eval_results_best-iteration.json''' ) , )
else:
# Assume that the last iteration is the best
logger.info('''Best iteration: %d''' , args.max_selftrain_iterations - 1 )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(SCREAMING_SNAKE_CASE__ , F'''eval_results_iter-{args.max_selftrain_iterations - 1}.json''' ) , os.path.join(SCREAMING_SNAKE_CASE__ , '''eval_results_best-iteration.json''' ) , ) | 39 | 1 |
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
lowerCAmelCase_ = False
try:
lowerCAmelCase_ = _is_package_available('''google.colab''')
except ModuleNotFoundError:
pass
@input.register
class snake_case_ :
'''simple docstring'''
def __init__( self : Optional[int] , _UpperCamelCase : str = None , _UpperCamelCase : list = [] ) ->int:
snake_case_ = 0
snake_case_ = choices
snake_case_ = prompt
if sys.platform == "win32":
snake_case_ = '''*'''
else:
snake_case_ = '''➔ '''
def snake_case__( self : List[Any] , _UpperCamelCase : int , _UpperCamelCase : str = "" ) ->Dict:
if sys.platform != "win32":
writeColor(self.choices[index] , 3_2 , _UpperCamelCase )
else:
forceWrite(self.choices[index] , _UpperCamelCase )
def snake_case__( self : Optional[int] , _UpperCamelCase : int ) ->List[Any]:
if index == self.position:
forceWrite(f''' {self.arrow_char} ''' )
self.write_choice(_UpperCamelCase )
else:
forceWrite(f''' {self.choices[index]}''' )
reset_cursor()
def snake_case__( self : Union[str, Any] , _UpperCamelCase : Direction , _UpperCamelCase : int = 1 ) ->List[Any]:
snake_case_ = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(_UpperCamelCase )
move_cursor(_UpperCamelCase , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP['''up'''] )
def snake_case__( self : Any ) ->List[str]:
self.move_direction(Direction.UP )
@input.mark(KEYMAP['''down'''] )
def snake_case__( self : Tuple ) ->str:
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP['''newline'''] )
def snake_case__( self : Any ) ->int:
move_cursor(len(self.choices ) - self.position , '''DOWN''' )
return self.position
@input.mark(KEYMAP['''interrupt'''] )
def snake_case__( self : int ) ->Union[str, Any]:
move_cursor(len(self.choices ) - self.position , '''DOWN''' )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(_UpperCamelCase )] for number in range(1_0 )] )
def snake_case__( self : Optional[Any] ) ->int:
snake_case_ = int(chr(self.current_selection ) )
snake_case_ = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , _UpperCamelCase )
else:
return
else:
return
def snake_case__( self : Tuple , _UpperCamelCase : int = 0 ) ->List[str]:
if self.prompt:
linebreak()
forceWrite(self.prompt , '''\n''' )
if in_colab:
forceWrite('''Please input a choice index (starting from 0), and press enter''' , '''\n''' )
else:
forceWrite('''Please select a choice using the arrow or number keys, and selecting with enter''' , '''\n''' )
snake_case_ = default_choice
for i in range(len(self.choices ) ):
self.print_choice(_UpperCamelCase )
forceWrite('''\n''' )
move_cursor(len(self.choices ) - self.position , '''UP''' )
with cursor.hide():
while True:
if in_colab:
try:
snake_case_ = int(builtins.input() )
except ValueError:
snake_case_ = default_choice
else:
snake_case_ = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , '''UP''' )
clear_line()
self.write_choice(_UpperCamelCase , '''\n''' )
return choice | 39 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case_ ( __A , __A , __A , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = AltDiffusionPipeline
SCREAMING_SNAKE_CASE : Dict = TEXT_TO_IMAGE_PARAMS
SCREAMING_SNAKE_CASE : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
SCREAMING_SNAKE_CASE : Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
SCREAMING_SNAKE_CASE : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
def snake_case__( self : Dict ) ->int:
torch.manual_seed(0 )
snake_case_ = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , )
snake_case_ = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_UpperCamelCase , set_alpha_to_one=_UpperCamelCase , )
torch.manual_seed(0 )
snake_case_ = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
snake_case_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_0_0_2 , )
snake_case_ = CLIPTextModel(_UpperCamelCase )
snake_case_ = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
snake_case_ = 7_7
snake_case_ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def snake_case__( self : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : Dict=0 ) ->Any:
if str(_UpperCamelCase ).startswith('''mps''' ):
snake_case_ = torch.manual_seed(_UpperCamelCase )
else:
snake_case_ = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
snake_case_ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def snake_case__( self : Dict ) ->List[str]:
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def snake_case__( self : List[str] ) ->Any:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def snake_case__( self : Dict ) ->Any:
snake_case_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case_ = self.get_dummy_components()
torch.manual_seed(0 )
snake_case_ = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_0_0_2 , )
# TODO: remove after fixing the non-deterministic text encoder
snake_case_ = RobertaSeriesModelWithTransformation(_UpperCamelCase )
snake_case_ = text_encoder
snake_case_ = AltDiffusionPipeline(**_UpperCamelCase )
snake_case_ = alt_pipe.to(_UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=_UpperCamelCase )
snake_case_ = self.get_dummy_inputs(_UpperCamelCase )
snake_case_ = '''A photo of an astronaut'''
snake_case_ = alt_pipe(**_UpperCamelCase )
snake_case_ = output.images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
snake_case_ = np.array(
[0.5748162, 0.60447145, 0.48821217, 0.50100636, 0.5431185, 0.45763683, 0.49657696, 0.48132733, 0.47573093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case__( self : Tuple ) ->Union[str, Any]:
snake_case_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case_ = self.get_dummy_components()
snake_case_ = PNDMScheduler(skip_prk_steps=_UpperCamelCase )
torch.manual_seed(0 )
snake_case_ = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_0_0_2 , )
# TODO: remove after fixing the non-deterministic text encoder
snake_case_ = RobertaSeriesModelWithTransformation(_UpperCamelCase )
snake_case_ = text_encoder
snake_case_ = AltDiffusionPipeline(**_UpperCamelCase )
snake_case_ = alt_pipe.to(_UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=_UpperCamelCase )
snake_case_ = self.get_dummy_inputs(_UpperCamelCase )
snake_case_ = alt_pipe(**_UpperCamelCase )
snake_case_ = output.images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
snake_case_ = np.array(
[0.51605093, 0.5707241, 0.47365507, 0.50578886, 0.5633877, 0.4642503, 0.5182081, 0.48763484, 0.49084237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : int ) ->List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__( self : List[str] ) ->Tuple:
# make sure here that pndm scheduler skips prk
snake_case_ = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , safety_checker=_UpperCamelCase )
snake_case_ = alt_pipe.to(_UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=_UpperCamelCase )
snake_case_ = '''A painting of a squirrel eating a burger'''
snake_case_ = torch.manual_seed(0 )
snake_case_ = alt_pipe([prompt] , generator=_UpperCamelCase , guidance_scale=6.0 , num_inference_steps=2_0 , output_type='''np''' )
snake_case_ = output.images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
snake_case_ = np.array([0.1010, 0.0800, 0.0794, 0.0885, 0.0843, 0.0762, 0.0769, 0.0729, 0.0586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case__( self : List[str] ) ->Optional[Any]:
snake_case_ = DDIMScheduler.from_pretrained('''BAAI/AltDiffusion''' , subfolder='''scheduler''' )
snake_case_ = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , scheduler=_UpperCamelCase , safety_checker=_UpperCamelCase )
snake_case_ = alt_pipe.to(_UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=_UpperCamelCase )
snake_case_ = '''A painting of a squirrel eating a burger'''
snake_case_ = torch.manual_seed(0 )
snake_case_ = alt_pipe([prompt] , generator=_UpperCamelCase , num_inference_steps=2 , output_type='''numpy''' )
snake_case_ = output.images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
snake_case_ = np.array([0.4019, 0.4052, 0.3810, 0.4119, 0.3916, 0.3982, 0.4651, 0.4195, 0.5323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 39 | 1 |
from math import factorial
lowerCAmelCase_ = {str(digit): factorial(digit) for digit in range(10)}
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise TypeError('''Parameter number must be int''' )
if number < 0:
raise ValueError('''Parameter number must be greater than or equal to 0''' )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(SCREAMING_SNAKE_CASE__ ) )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = 60 , SCREAMING_SNAKE_CASE__ = 1000000 ):
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise TypeError('''Parameters chain_length and number_limit must be int''' )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
'''Parameters chain_length and number_limit must be greater than 0''' )
# the counter for the chains with the exact desired length
snake_case_ = 0
# the cached sizes of the previous chains
snake_case_ = {}
for start_chain_element in range(1 , SCREAMING_SNAKE_CASE__ ):
# The temporary set will contain the elements of the chain
snake_case_ = set()
snake_case_ = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
snake_case_ = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(SCREAMING_SNAKE_CASE__ )
chain_set_length += 1
snake_case_ = digit_factorial_sum(SCREAMING_SNAKE_CASE__ )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
snake_case_ = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{solution()}""") | 39 |
from math import factorial
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError('''Please enter positive integers for n and k where n >= k''' )
return factorial(SCREAMING_SNAKE_CASE__ ) // (factorial(SCREAMING_SNAKE_CASE__ ) * factorial(n - k ))
if __name__ == "__main__":
print(
'''The number of five-card hands possible from a standard''',
f"""fifty-two card deck is: {combinations(52, 5)}\n""",
)
print(
'''If a class of 40 students must be arranged into groups of''',
f"""4 for group projects, there are {combinations(40, 4)} ways""",
'''to arrange them.\n''',
)
print(
'''If 10 teams are competing in a Formula One race, there''',
f"""are {combinations(10, 3)} ways that first, second and""",
'''third place can be awarded.''',
) | 39 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
lowerCAmelCase_ = logging.get_logger(__name__)
@dataclass
class snake_case_ :
'''simple docstring'''
def __init__( self : Any , _UpperCamelCase : Any=False , _UpperCamelCase : Union[str, Any]=False , _UpperCamelCase : Optional[int]=6.0 , _UpperCamelCase : Union[str, Any]=None , _UpperCamelCase : Optional[int]=False , _UpperCamelCase : Optional[Any]=False , _UpperCamelCase : int=None , _UpperCamelCase : Union[str, Any]="fp4" , _UpperCamelCase : List[str]=False , **_UpperCamelCase : int , ) ->Optional[Any]:
snake_case_ = load_in_abit
snake_case_ = load_in_abit
snake_case_ = llm_inta_threshold
snake_case_ = llm_inta_skip_modules
snake_case_ = llm_inta_enable_fpaa_cpu_offload
snake_case_ = llm_inta_has_fpaa_weight
snake_case_ = bnb_abit_quant_type
snake_case_ = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
snake_case_ = torch.floataa
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
snake_case_ = getattr(_UpperCamelCase , _UpperCamelCase )
elif isinstance(_UpperCamelCase , torch.dtype ):
snake_case_ = bnb_abit_compute_dtype
else:
raise ValueError('''bnb_4bit_compute_dtype must be a string or a torch.dtype''' )
self.post_init()
def snake_case__( self : Tuple ) ->List[Any]:
if not isinstance(self.llm_inta_threshold , _UpperCamelCase ):
raise ValueError('''llm_int8_threshold must be a float''' )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , _UpperCamelCase ):
raise ValueError('''llm_int8_skip_modules must be a list of strings''' )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , _UpperCamelCase ):
raise ValueError('''llm_int8_enable_fp32_cpu_offload must be a boolean''' )
if not isinstance(self.llm_inta_has_fpaa_weight , _UpperCamelCase ):
raise ValueError('''llm_int8_has_fp16_weight must be a boolean''' )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ):
raise ValueError('''bnb_4bit_compute_dtype must be torch.dtype''' )
if not isinstance(self.bnb_abit_quant_type , _UpperCamelCase ):
raise ValueError('''bnb_4bit_quant_type must be a string''' )
if not isinstance(self.bnb_abit_use_double_quant , _UpperCamelCase ):
raise ValueError('''bnb_4bit_use_double_quant must be a boolean''' )
if self.load_in_abit and not version.parse(importlib.metadata.version('''bitsandbytes''' ) ) >= version.parse(
'''0.39.0''' ):
raise ValueError(
'''4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version''' )
def snake_case__( self : Union[str, Any] ) ->Dict:
return self.load_in_abit or self.load_in_abit
def snake_case__( self : Optional[int] ) ->Union[str, Any]:
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def snake_case__( cls : Any , _UpperCamelCase : Any , _UpperCamelCase : List[Any] , **_UpperCamelCase : Union[str, Any] ) ->Optional[Any]:
snake_case_ = cls(**_UpperCamelCase )
snake_case_ = []
for key, value in kwargs.items():
if hasattr(_UpperCamelCase , _UpperCamelCase ):
setattr(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
to_remove.append(_UpperCamelCase )
for key in to_remove:
kwargs.pop(_UpperCamelCase , _UpperCamelCase )
if return_unused_kwargs:
return config, kwargs
else:
return config
def snake_case__( self : List[str] , _UpperCamelCase : Union[str, os.PathLike] ) ->Optional[int]:
with open(_UpperCamelCase , '''w''' , encoding='''utf-8''' ) as writer:
snake_case_ = self.to_dict()
snake_case_ = json.dumps(_UpperCamelCase , indent=2 , sort_keys=_UpperCamelCase ) + '''\n'''
writer.write(_UpperCamelCase )
def snake_case__( self : Any ) ->Dict[str, Any]:
snake_case_ = copy.deepcopy(self.__dict__ )
snake_case_ = str(output['''bnb_4bit_compute_dtype'''] ).split('''.''' )[1]
return output
def __repr__( self : Optional[Any] ) ->List[str]:
return f'''{self.__class__.__name__} {self.to_json_string()}'''
def snake_case__( self : Any , _UpperCamelCase : bool = True ) ->str:
if use_diff is True:
snake_case_ = self.to_diff_dict()
else:
snake_case_ = self.to_dict()
return json.dumps(_UpperCamelCase , indent=2 , sort_keys=_UpperCamelCase ) + "\n"
def snake_case__( self : List[str] ) ->Dict[str, Any]:
snake_case_ = self.to_dict()
# get the default config dict
snake_case_ = BitsAndBytesConfig().to_dict()
snake_case_ = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
snake_case_ = value
return serializable_config_dict | 39 |
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
lowerCAmelCase_ = sys.version_info >= (3, 10)
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None ):
return field(default_factory=lambda: default , metadata=SCREAMING_SNAKE_CASE__ )
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : float
SCREAMING_SNAKE_CASE : str
SCREAMING_SNAKE_CASE : bool
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = 42
SCREAMING_SNAKE_CASE : str = field(default="toto" , metadata={"help": "help message"} )
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : bool = False
SCREAMING_SNAKE_CASE : bool = True
SCREAMING_SNAKE_CASE : Optional[bool] = None
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = "titi"
SCREAMING_SNAKE_CASE : Any = "toto"
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = "titi"
SCREAMING_SNAKE_CASE : Optional[Any] = "toto"
SCREAMING_SNAKE_CASE : Any = 42
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : BasicEnum = "toto"
def snake_case__( self : Tuple ) ->List[str]:
snake_case_ = BasicEnum(self.foo )
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : MixedTypeEnum = "toto"
def snake_case__( self : Union[str, Any] ) ->Dict:
snake_case_ = MixedTypeEnum(self.foo )
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Optional[float] = field(default=__A , metadata={"help": "help message"} )
SCREAMING_SNAKE_CASE : Optional[str] = None
SCREAMING_SNAKE_CASE : Optional[List[str]] = list_field(default=[] )
SCREAMING_SNAKE_CASE : Optional[List[int]] = list_field(default=[] )
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[int] = list_field(default=[] )
SCREAMING_SNAKE_CASE : List[int] = list_field(default=[1, 2, 3] )
SCREAMING_SNAKE_CASE : List[str] = list_field(default=["Hallo", "Bonjour", "Hello"] )
SCREAMING_SNAKE_CASE : List[float] = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[int] = field()
SCREAMING_SNAKE_CASE : str = field()
SCREAMING_SNAKE_CASE : BasicEnum = field()
def snake_case__( self : Optional[Any] ) ->Tuple:
snake_case_ = BasicEnum(self.required_enum )
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : "BasicEnum" = field()
SCREAMING_SNAKE_CASE : "Optional[bool]" = None
SCREAMING_SNAKE_CASE : "str" = field(default="toto" , metadata={"help": "help message"} )
SCREAMING_SNAKE_CASE : "List[str]" = list_field(default=["Hallo", "Bonjour", "Hello"] )
if is_python_no_less_than_3_10:
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : bool = False
SCREAMING_SNAKE_CASE : bool = True
SCREAMING_SNAKE_CASE : bool | None = None
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : int | None = None
SCREAMING_SNAKE_CASE : float | None = field(default=__A , metadata={"help": "help message"} )
SCREAMING_SNAKE_CASE : str | None = None
SCREAMING_SNAKE_CASE : list[str] | None = list_field(default=[] )
SCREAMING_SNAKE_CASE : list[int] | None = list_field(default=[] )
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : Dict , _UpperCamelCase : argparse.ArgumentParser , _UpperCamelCase : argparse.ArgumentParser ) ->str:
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
snake_case_ = {k: v for k, v in vars(_UpperCamelCase ).items() if k != '''container'''}
snake_case_ = {k: v for k, v in vars(_UpperCamelCase ).items() if k != '''container'''}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('''choices''' , _UpperCamelCase ) and yy.get('''choices''' , _UpperCamelCase ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['''type'''](_UpperCamelCase ) , yy['''type'''](_UpperCamelCase ) )
del xx["type"], yy["type"]
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : Optional[Any] ) ->Dict:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_UpperCamelCase , required=_UpperCamelCase )
expected.add_argument('''--bar''' , type=_UpperCamelCase , required=_UpperCamelCase )
expected.add_argument('''--baz''' , type=_UpperCamelCase , required=_UpperCamelCase )
expected.add_argument('''--flag''' , type=_UpperCamelCase , default=_UpperCamelCase , const=_UpperCamelCase , nargs='''?''' )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
snake_case_ = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5''']
((snake_case_), ) = parser.parse_args_into_dataclasses(_UpperCamelCase , look_for_args_file=_UpperCamelCase )
self.assertFalse(example.flag )
def snake_case__( self : Tuple ) ->Optional[int]:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=4_2 , type=_UpperCamelCase )
expected.add_argument('''--baz''' , default='''toto''' , type=_UpperCamelCase , help='''help message''' )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : Tuple ) ->Tuple:
snake_case_ = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_UpperCamelCase , default=_UpperCamelCase , const=_UpperCamelCase , nargs='''?''' )
expected.add_argument('''--baz''' , type=_UpperCamelCase , default=_UpperCamelCase , const=_UpperCamelCase , nargs='''?''' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('''--no_baz''' , action='''store_false''' , default=_UpperCamelCase , dest='''baz''' )
expected.add_argument('''--opt''' , type=_UpperCamelCase , default=_UpperCamelCase )
snake_case_ = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_UpperCamelCase )
for dataclass_type in dataclass_types:
snake_case_ = HfArgumentParser(_UpperCamelCase )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
snake_case_ = parser.parse_args([] )
self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , baz=_UpperCamelCase , opt=_UpperCamelCase ) )
snake_case_ = parser.parse_args(['''--foo''', '''--no_baz'''] )
self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , baz=_UpperCamelCase , opt=_UpperCamelCase ) )
snake_case_ = parser.parse_args(['''--foo''', '''--baz'''] )
self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , baz=_UpperCamelCase , opt=_UpperCamelCase ) )
snake_case_ = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] )
self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , baz=_UpperCamelCase , opt=_UpperCamelCase ) )
snake_case_ = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] )
self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , baz=_UpperCamelCase , opt=_UpperCamelCase ) )
def snake_case__( self : Tuple ) ->Tuple:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 4_2] , type=make_choice_type_function(['''titi''', '''toto''', 4_2] ) , )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
snake_case_ = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
snake_case_ = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
snake_case_ = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
snake_case_ = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
snake_case_ = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 4_2 )
snake_case_ = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def snake_case__( self : Tuple ) ->Union[str, Any]:
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : Literal["titi", "toto", 42] = "toto"
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 4_2) , type=make_choice_type_function(['''titi''', '''toto''', 4_2] ) , )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
snake_case_ = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
snake_case_ = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
snake_case_ = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 4_2 )
def snake_case__( self : List[str] ) ->int:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = argparse.ArgumentParser()
expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=_UpperCamelCase )
expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=_UpperCamelCase )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_UpperCamelCase )
expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=_UpperCamelCase )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
snake_case_ = parser.parse_args([] )
self.assertEqual(
_UpperCamelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , )
snake_case_ = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() )
self.assertEqual(_UpperCamelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) )
def snake_case__( self : Optional[Any] ) ->List[Any]:
snake_case_ = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=_UpperCamelCase , type=_UpperCamelCase )
expected.add_argument('''--bar''' , default=_UpperCamelCase , type=_UpperCamelCase , help='''help message''' )
expected.add_argument('''--baz''' , default=_UpperCamelCase , type=_UpperCamelCase )
expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=_UpperCamelCase )
expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=_UpperCamelCase )
snake_case_ = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_UpperCamelCase )
for dataclass_type in dataclass_types:
snake_case_ = HfArgumentParser(_UpperCamelCase )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
snake_case_ = parser.parse_args([] )
self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , bar=_UpperCamelCase , baz=_UpperCamelCase , ces=[] , des=[] ) )
snake_case_ = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() )
self.assertEqual(_UpperCamelCase , Namespace(foo=1_2 , bar=3.14 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) )
def snake_case__( self : Union[str, Any] ) ->Optional[int]:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = argparse.ArgumentParser()
expected.add_argument('''--required_list''' , nargs='''+''' , type=_UpperCamelCase , required=_UpperCamelCase )
expected.add_argument('''--required_str''' , type=_UpperCamelCase , required=_UpperCamelCase )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_UpperCamelCase , )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : List[str] ) ->int:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_UpperCamelCase , required=_UpperCamelCase )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_UpperCamelCase , )
expected.add_argument('''--opt''' , type=_UpperCamelCase , default=_UpperCamelCase )
expected.add_argument('''--baz''' , default='''toto''' , type=_UpperCamelCase , help='''help message''' )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_UpperCamelCase )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : Dict ) ->Any:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = {
'''foo''': 1_2,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
snake_case_ = parser.parse_dict(_UpperCamelCase )[0]
snake_case_ = BasicExample(**_UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : int ) ->Dict:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = {
'''foo''': 1_2,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
'''extra''': 4_2,
}
self.assertRaises(_UpperCamelCase , parser.parse_dict , _UpperCamelCase , allow_extra_keys=_UpperCamelCase )
def snake_case__( self : str ) ->Tuple:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = {
'''foo''': 1_2,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ = os.path.join(_UpperCamelCase , '''temp_json''' )
os.mkdir(_UpperCamelCase )
with open(temp_local_path + '''.json''' , '''w+''' ) as f:
json.dump(_UpperCamelCase , _UpperCamelCase )
snake_case_ = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0]
snake_case_ = BasicExample(**_UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : Optional[int] ) ->str:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = {
'''foo''': 1_2,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ = os.path.join(_UpperCamelCase , '''temp_yaml''' )
os.mkdir(_UpperCamelCase )
with open(temp_local_path + '''.yaml''' , '''w+''' ) as f:
yaml.dump(_UpperCamelCase , _UpperCamelCase )
snake_case_ = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0]
snake_case_ = BasicExample(**_UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : Any ) ->Any:
snake_case_ = HfArgumentParser(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase ) | 39 | 1 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''ut/deta''': '''https://huggingface.co/ut/deta/resolve/main/config.json''',
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = "deta"
SCREAMING_SNAKE_CASE : Optional[Any] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self : Optional[int] , _UpperCamelCase : List[Any]=None , _UpperCamelCase : Optional[Any]=9_0_0 , _UpperCamelCase : Union[str, Any]=2_0_4_8 , _UpperCamelCase : Tuple=6 , _UpperCamelCase : List[str]=2_0_4_8 , _UpperCamelCase : Tuple=8 , _UpperCamelCase : Optional[int]=6 , _UpperCamelCase : int=1_0_2_4 , _UpperCamelCase : int=8 , _UpperCamelCase : str=0.0 , _UpperCamelCase : str=True , _UpperCamelCase : Tuple="relu" , _UpperCamelCase : int=2_5_6 , _UpperCamelCase : Tuple=0.1 , _UpperCamelCase : str=0.0 , _UpperCamelCase : Union[str, Any]=0.0 , _UpperCamelCase : Any=0.02 , _UpperCamelCase : Dict=1.0 , _UpperCamelCase : Tuple=True , _UpperCamelCase : Dict=False , _UpperCamelCase : Any="sine" , _UpperCamelCase : Any=5 , _UpperCamelCase : Tuple=4 , _UpperCamelCase : Union[str, Any]=4 , _UpperCamelCase : Any=True , _UpperCamelCase : List[str]=3_0_0 , _UpperCamelCase : List[Any]=True , _UpperCamelCase : int=True , _UpperCamelCase : Dict=1 , _UpperCamelCase : Any=5 , _UpperCamelCase : Optional[Any]=2 , _UpperCamelCase : Any=1 , _UpperCamelCase : Optional[int]=1 , _UpperCamelCase : Optional[int]=5 , _UpperCamelCase : Optional[Any]=2 , _UpperCamelCase : int=0.1 , _UpperCamelCase : List[Any]=0.25 , **_UpperCamelCase : str , ) ->Tuple:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
snake_case_ = CONFIG_MAPPING['''resnet'''](out_features=['''stage2''', '''stage3''', '''stage4'''] )
else:
if isinstance(_UpperCamelCase , _UpperCamelCase ):
snake_case_ = backbone_config.pop('''model_type''' )
snake_case_ = CONFIG_MAPPING[backbone_model_type]
snake_case_ = config_class.from_dict(_UpperCamelCase )
snake_case_ = backbone_config
snake_case_ = num_queries
snake_case_ = max_position_embeddings
snake_case_ = d_model
snake_case_ = encoder_ffn_dim
snake_case_ = encoder_layers
snake_case_ = encoder_attention_heads
snake_case_ = decoder_ffn_dim
snake_case_ = decoder_layers
snake_case_ = decoder_attention_heads
snake_case_ = dropout
snake_case_ = attention_dropout
snake_case_ = activation_dropout
snake_case_ = activation_function
snake_case_ = init_std
snake_case_ = init_xavier_std
snake_case_ = encoder_layerdrop
snake_case_ = auxiliary_loss
snake_case_ = position_embedding_type
# deformable attributes
snake_case_ = num_feature_levels
snake_case_ = encoder_n_points
snake_case_ = decoder_n_points
snake_case_ = two_stage
snake_case_ = two_stage_num_proposals
snake_case_ = with_box_refine
snake_case_ = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
snake_case_ = class_cost
snake_case_ = bbox_cost
snake_case_ = giou_cost
# Loss coefficients
snake_case_ = mask_loss_coefficient
snake_case_ = dice_loss_coefficient
snake_case_ = bbox_loss_coefficient
snake_case_ = giou_loss_coefficient
snake_case_ = eos_coefficient
snake_case_ = focal_alpha
super().__init__(is_encoder_decoder=_UpperCamelCase , **_UpperCamelCase )
@property
def snake_case__( self : Tuple ) ->int:
return self.encoder_attention_heads
@property
def snake_case__( self : Optional[Any] ) ->int:
return self.d_model
def snake_case__( self : Union[str, Any] ) ->Tuple:
snake_case_ = copy.deepcopy(self.__dict__ )
snake_case_ = self.backbone_config.to_dict()
snake_case_ = self.__class__.model_type
return output | 39 |
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
lowerCAmelCase_ = logging.get_logger(__name__)
class snake_case_ ( __A ):
'''simple docstring'''
def __init__( self : Dict , *_UpperCamelCase : int , **_UpperCamelCase : Tuple ) ->None:
warnings.warn(
'''The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use ChineseCLIPImageProcessor instead.''' , _UpperCamelCase , )
super().__init__(*_UpperCamelCase , **_UpperCamelCase ) | 39 | 1 |
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
lowerCAmelCase_ = open # noqa: we just need to have a builtin inside this module to test it properly | 39 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = "rwkv"
SCREAMING_SNAKE_CASE : Any = {"max_position_embeddings": "context_length"}
def __init__( self : Union[str, Any] , _UpperCamelCase : Any=5_0_2_7_7 , _UpperCamelCase : Optional[int]=1_0_2_4 , _UpperCamelCase : Optional[int]=4_0_9_6 , _UpperCamelCase : str=3_2 , _UpperCamelCase : Tuple=None , _UpperCamelCase : Dict=None , _UpperCamelCase : Optional[int]=1e-5 , _UpperCamelCase : Any=0 , _UpperCamelCase : Optional[Any]=0 , _UpperCamelCase : int=6 , _UpperCamelCase : Dict=False , _UpperCamelCase : Optional[int]=True , **_UpperCamelCase : int , ) ->List[str]:
snake_case_ = vocab_size
snake_case_ = context_length
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = attention_hidden_size if attention_hidden_size is not None else hidden_size
snake_case_ = intermediate_size if intermediate_size is not None else 4 * hidden_size
snake_case_ = layer_norm_epsilon
snake_case_ = rescale_every
snake_case_ = use_cache
snake_case_ = bos_token_id
snake_case_ = eos_token_id
super().__init__(
tie_word_embeddings=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase ) | 39 | 1 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if len(SCREAMING_SNAKE_CASE__ ) < k or k < 0:
raise ValueError('''Invalid Input''' )
snake_case_ = snake_case_ = sum(array[:k] )
for i in range(len(SCREAMING_SNAKE_CASE__ ) - k ):
snake_case_ = current_sum - array[i] + array[i + k]
snake_case_ = max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
lowerCAmelCase_ = [randint(-10_00, 10_00) for i in range(1_00)]
lowerCAmelCase_ = randint(0, 1_10)
print(f"""The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}""") | 39 |
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
lowerCAmelCase_ = get_logger(__name__)
class snake_case_ :
'''simple docstring'''
def __init__( self : int , _UpperCamelCase : Optional[str] = None ) ->Tuple:
snake_case_ = (
os.path.join(_UpperCamelCase , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
snake_case_ = Extractor
def snake_case__( self : Any , _UpperCamelCase : str ) ->str:
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
snake_case_ = os.path.abspath(_UpperCamelCase )
return os.path.join(self.extract_dir , hash_url_to_filename(_UpperCamelCase ) )
def snake_case__( self : int , _UpperCamelCase : str , _UpperCamelCase : bool ) ->bool:
return force_extract or (
not os.path.isfile(_UpperCamelCase ) and not (os.path.isdir(_UpperCamelCase ) and os.listdir(_UpperCamelCase ))
)
def snake_case__( self : Tuple , _UpperCamelCase : str , _UpperCamelCase : bool = False ) ->str:
snake_case_ = self.extractor.infer_extractor_format(_UpperCamelCase )
if not extractor_format:
return input_path
snake_case_ = self._get_output_path(_UpperCamelCase )
if self._do_extract(_UpperCamelCase , _UpperCamelCase ):
self.extractor.extract(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return output_path
class snake_case_ ( __A ):
'''simple docstring'''
@classmethod
@abstractmethod
def snake_case__( cls : Optional[int] , _UpperCamelCase : Union[Path, str] , **_UpperCamelCase : str ) ->bool:
...
@staticmethod
@abstractmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
...
class snake_case_ ( __A , __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[bytes] = []
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : int ) ->List[Any]:
with open(_UpperCamelCase , '''rb''' ) as f:
return f.read(_UpperCamelCase )
@classmethod
def snake_case__( cls : Union[str, Any] , _UpperCamelCase : Union[Path, str] , _UpperCamelCase : bytes = b"" ) ->bool:
if not magic_number:
snake_case_ = max(len(_UpperCamelCase ) for cls_magic_number in cls.magic_numbers )
try:
snake_case_ = cls.read_magic_number(_UpperCamelCase , _UpperCamelCase )
except OSError:
return False
return any(magic_number.startswith(_UpperCamelCase ) for cls_magic_number in cls.magic_numbers )
class snake_case_ ( __A ):
'''simple docstring'''
@classmethod
def snake_case__( cls : Union[str, Any] , _UpperCamelCase : Union[Path, str] , **_UpperCamelCase : Any ) ->bool:
return tarfile.is_tarfile(_UpperCamelCase )
@staticmethod
def snake_case__( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Dict ) ->List[str]:
def resolved(_UpperCamelCase : str ) -> str:
return os.path.realpath(os.path.abspath(_UpperCamelCase ) )
def badpath(_UpperCamelCase : str , _UpperCamelCase : str ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(_UpperCamelCase , _UpperCamelCase ) ).startswith(_UpperCamelCase )
def badlink(_UpperCamelCase : Tuple , _UpperCamelCase : str ) -> bool:
# Links are interpreted relative to the directory containing the link
snake_case_ = resolved(os.path.join(_UpperCamelCase , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=_UpperCamelCase )
snake_case_ = resolved(_UpperCamelCase )
for finfo in members:
if badpath(finfo.name , _UpperCamelCase ):
logger.error(f'''Extraction of {finfo.name} is blocked (illegal path)''' )
elif finfo.issym() and badlink(_UpperCamelCase , _UpperCamelCase ):
logger.error(f'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''' )
elif finfo.islnk() and badlink(_UpperCamelCase , _UpperCamelCase ):
logger.error(f'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''' )
else:
yield finfo
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
snake_case_ = tarfile.open(_UpperCamelCase )
tar_file.extractall(_UpperCamelCase , members=TarExtractor.safemembers(_UpperCamelCase , _UpperCamelCase ) )
tar_file.close()
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = [b"\x1F\x8B"]
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
with gzip.open(_UpperCamelCase , '''rb''' ) as gzip_file:
with open(_UpperCamelCase , '''wb''' ) as extracted_file:
shutil.copyfileobj(_UpperCamelCase , _UpperCamelCase )
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = [
b"PK\x03\x04",
b"PK\x05\x06", # empty archive
b"PK\x07\x08", # spanned archive
]
@classmethod
def snake_case__( cls : List[str] , _UpperCamelCase : Union[Path, str] , _UpperCamelCase : bytes = b"" ) ->bool:
if super().is_extractable(_UpperCamelCase , magic_number=_UpperCamelCase ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(_UpperCamelCase , '''rb''' ) as fp:
snake_case_ = _EndRecData(_UpperCamelCase )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
snake_case_ = fp.read(_UpperCamelCase ) # CD is where we expect it to be
if len(_UpperCamelCase ) == sizeCentralDir:
snake_case_ = struct.unpack(_UpperCamelCase , _UpperCamelCase ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
with zipfile.ZipFile(_UpperCamelCase , '''r''' ) as zip_file:
zip_file.extractall(_UpperCamelCase )
zip_file.close()
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = [b"\xFD\x37\x7A\x58\x5A\x00"]
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
with lzma.open(_UpperCamelCase ) as compressed_file:
with open(_UpperCamelCase , '''wb''' ) as extracted_file:
shutil.copyfileobj(_UpperCamelCase , _UpperCamelCase )
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = [b"Rar!\x1a\x07\x00", b"Rar!\x1a\x07\x01\x00"] # RAR_ID # RAR5_ID
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
if not config.RARFILE_AVAILABLE:
raise ImportError('''Please pip install rarfile''' )
import rarfile
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
snake_case_ = rarfile.RarFile(_UpperCamelCase )
rf.extractall(_UpperCamelCase )
rf.close()
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = [b"\x28\xb5\x2F\xFD"]
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
if not config.ZSTANDARD_AVAILABLE:
raise ImportError('''Please pip install zstandard''' )
import zstandard as zstd
snake_case_ = zstd.ZstdDecompressor()
with open(_UpperCamelCase , '''rb''' ) as ifh, open(_UpperCamelCase , '''wb''' ) as ofh:
dctx.copy_stream(_UpperCamelCase , _UpperCamelCase )
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = [b"\x42\x5A\x68"]
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
with bza.open(_UpperCamelCase , '''rb''' ) as compressed_file:
with open(_UpperCamelCase , '''wb''' ) as extracted_file:
shutil.copyfileobj(_UpperCamelCase , _UpperCamelCase )
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = [b"\x37\x7A\xBC\xAF\x27\x1C"]
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
if not config.PY7ZR_AVAILABLE:
raise ImportError('''Please pip install py7zr''' )
import pyazr
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
with pyazr.SevenZipFile(_UpperCamelCase , '''r''' ) as archive:
archive.extractall(_UpperCamelCase )
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = [b"\x04\x22\x4D\x18"]
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
if not config.LZ4_AVAILABLE:
raise ImportError('''Please pip install lz4''' )
import lza.frame
with lza.frame.open(_UpperCamelCase , '''rb''' ) as compressed_file:
with open(_UpperCamelCase , '''wb''' ) as extracted_file:
shutil.copyfileobj(_UpperCamelCase , _UpperCamelCase )
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def snake_case__( cls : List[Any] ) ->List[str]:
return max(
len(_UpperCamelCase )
for extractor in cls.extractors.values()
if issubclass(_UpperCamelCase , _UpperCamelCase )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : int ) ->Tuple:
try:
return MagicNumberBaseExtractor.read_magic_number(_UpperCamelCase , magic_number_length=_UpperCamelCase )
except OSError:
return b""
@classmethod
def snake_case__( cls : Optional[Any] , _UpperCamelCase : Union[Path, str] , _UpperCamelCase : bool = False ) ->bool:
warnings.warn(
'''Method \'is_extractable\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '''
'''Use \'infer_extractor_format\' instead.''' , category=_UpperCamelCase , )
snake_case_ = cls.infer_extractor_format(_UpperCamelCase )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def snake_case__( cls : int , _UpperCamelCase : Union[Path, str] ) ->str: # <Added version="2.4.0"/>
snake_case_ = cls._get_magic_number_max_length()
snake_case_ = cls._read_magic_number(_UpperCamelCase , _UpperCamelCase )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(_UpperCamelCase , magic_number=_UpperCamelCase ):
return extractor_format
@classmethod
def snake_case__( cls : Optional[int] , _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Optional[str] = None , _UpperCamelCase : Optional[BaseExtractor] = "deprecated" , ) ->None:
os.makedirs(os.path.dirname(_UpperCamelCase ) , exist_ok=_UpperCamelCase )
# Prevent parallel extractions
snake_case_ = str(Path(_UpperCamelCase ).with_suffix('''.lock''' ) )
with FileLock(_UpperCamelCase ):
shutil.rmtree(_UpperCamelCase , ignore_errors=_UpperCamelCase )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(_UpperCamelCase , _UpperCamelCase ): # passed as positional arg
warnings.warn(
'''Parameter \'extractor\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '''
'''Use \'extractor_format\' instead.''' , category=_UpperCamelCase , )
snake_case_ = extractor if extractor != '''deprecated''' else extractor_format
else:
snake_case_ = cls.extractors[extractor_format]
return extractor.extract(_UpperCamelCase , _UpperCamelCase )
else:
warnings.warn(
'''Parameter \'extractor_format\' was made required in version 2.4.0 and not passing it will raise an '''
'''exception in 3.0.0.''' , category=_UpperCamelCase , )
for extractor in cls.extractors.values():
if extractor.is_extractable(_UpperCamelCase ):
return extractor.extract(_UpperCamelCase , _UpperCamelCase ) | 39 | 1 |
from math import factorial
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError('''Please enter positive integers for n and k where n >= k''' )
return factorial(SCREAMING_SNAKE_CASE__ ) // (factorial(SCREAMING_SNAKE_CASE__ ) * factorial(n - k ))
if __name__ == "__main__":
print(
'''The number of five-card hands possible from a standard''',
f"""fifty-two card deck is: {combinations(52, 5)}\n""",
)
print(
'''If a class of 40 students must be arranged into groups of''',
f"""4 for group projects, there are {combinations(40, 4)} ways""",
'''to arrange them.\n''',
)
print(
'''If 10 teams are competing in a Formula One race, there''',
f"""are {combinations(10, 3)} ways that first, second and""",
'''third place can be awarded.''',
) | 39 |
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
if any(not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or x < 0 for x in sequence ):
raise TypeError('''Sequence must be list of non-negative integers''' )
for _ in range(len(SCREAMING_SNAKE_CASE__ ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(SCREAMING_SNAKE_CASE__ , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9] | 39 | 1 |
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = [0 for i in range(r + 1 )]
# nc0 = 1
snake_case_ = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
snake_case_ = min(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5)) | 39 |
import re
from filelock import FileLock
try:
import nltk
lowerCAmelCase_ = True
except (ImportError, ModuleNotFoundError):
lowerCAmelCase_ = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
re.sub('''<n>''' , '''''' , SCREAMING_SNAKE_CASE__ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(SCREAMING_SNAKE_CASE__ ) ) | 39 | 1 |
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
snake_case_ = str(bin(SCREAMING_SNAKE_CASE__ ) )[2:] # remove the leading "0b"
snake_case_ = str(bin(SCREAMING_SNAKE_CASE__ ) )[2:]
snake_case_ = max(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) )
return "0b" + "".join(
str(int('''1''' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(SCREAMING_SNAKE_CASE__ ) , b_binary.zfill(SCREAMING_SNAKE_CASE__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 39 |
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = [0 for i in range(r + 1 )]
# nc0 = 1
snake_case_ = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
snake_case_ = min(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5)) | 39 | 1 |
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class snake_case_ ( ctypes.Structure ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = [("size", ctypes.c_int), ("visible", ctypes.c_byte)]
def __SCREAMING_SNAKE_CASE ():
if os.name == "nt":
snake_case_ = CursorInfo()
snake_case_ = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(SCREAMING_SNAKE_CASE__ , ctypes.byref(SCREAMING_SNAKE_CASE__ ) )
snake_case_ = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(SCREAMING_SNAKE_CASE__ , ctypes.byref(SCREAMING_SNAKE_CASE__ ) )
elif os.name == "posix":
sys.stdout.write('''\033[?25l''' )
sys.stdout.flush()
def __SCREAMING_SNAKE_CASE ():
if os.name == "nt":
snake_case_ = CursorInfo()
snake_case_ = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(SCREAMING_SNAKE_CASE__ , ctypes.byref(SCREAMING_SNAKE_CASE__ ) )
snake_case_ = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(SCREAMING_SNAKE_CASE__ , ctypes.byref(SCREAMING_SNAKE_CASE__ ) )
elif os.name == "posix":
sys.stdout.write('''\033[?25h''' )
sys.stdout.flush()
@contextmanager
def __SCREAMING_SNAKE_CASE ():
try:
hide_cursor()
yield
finally:
show_cursor() | 39 |
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
lowerCAmelCase_ = {
'''gwf-440k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/gwf-440k.ckpt''',
'''sample_rate''': 4_80_00,
'''sample_size''': 6_55_36,
},
'''jmann-small-190k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt''',
'''sample_rate''': 4_80_00,
'''sample_size''': 6_55_36,
},
'''jmann-large-580k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt''',
'''sample_rate''': 4_80_00,
'''sample_size''': 13_10_72,
},
'''maestro-uncond-150k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt''',
'''sample_rate''': 1_60_00,
'''sample_size''': 6_55_36,
},
'''unlocked-uncond-250k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt''',
'''sample_rate''': 1_60_00,
'''sample_size''': 6_55_36,
},
'''honk-140k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/honk-140k.ckpt''',
'''sample_rate''': 1_60_00,
'''sample_size''': 6_55_36,
},
}
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return torch.atana(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) / math.pi * 2
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = torch.sin(t * math.pi / 2 ) ** 2
snake_case_ = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
class snake_case_ ( __A ):
'''simple docstring'''
pass
class snake_case_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , _UpperCamelCase : int ) ->Optional[int]:
super().__init__()
snake_case_ = DiffusionAttnUnetaD(_UpperCamelCase , n_attn_layers=4 )
snake_case_ = deepcopy(self.diffusion )
snake_case_ = torch.quasirandom.SobolEngine(1 , scramble=_UpperCamelCase )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = MODELS_MAP[model_name]['''url''']
os.system(F'''wget {url} ./''' )
return F'''./{model_name}.ckpt'''
lowerCAmelCase_ = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
}
lowerCAmelCase_ = {
'''8''': '''resnets.0''',
'''9''': '''attentions.0''',
'''10''': '''resnets.1''',
'''11''': '''attentions.1''',
'''12''': '''resnets.2''',
'''13''': '''attentions.2''',
}
lowerCAmelCase_ = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
'''8''': '''resnets.3''',
'''9''': '''attentions.3''',
'''10''': '''resnets.4''',
'''11''': '''attentions.4''',
'''12''': '''resnets.5''',
'''13''': '''attentions.5''',
}
lowerCAmelCase_ = {
'''0''': '''resnets.0''',
'''1''': '''resnets.1''',
'''2''': '''resnets.2''',
'''4''': '''resnets.0''',
'''5''': '''resnets.1''',
'''6''': '''resnets.2''',
}
lowerCAmelCase_ = {
'''skip''': '''conv_skip''',
'''main.0''': '''conv_1''',
'''main.1''': '''group_norm_1''',
'''main.3''': '''conv_2''',
'''main.4''': '''group_norm_2''',
}
lowerCAmelCase_ = {
'''norm''': '''group_norm''',
'''qkv_proj''': ['''query''', '''key''', '''value'''],
'''out_proj''': ['''proj_attn'''],
}
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
if name.startswith('''skip''' ):
return name.replace('''skip''' , RES_CONV_MAP['''skip'''] )
# name has to be of format main.{digit}
if not name.startswith('''main.''' ):
raise ValueError(F'''ResConvBlock error with {name}''' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
for key, value in ATTN_MAP.items():
if name.startswith(SCREAMING_SNAKE_CASE__ ) and not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return name.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif name.startswith(SCREAMING_SNAKE_CASE__ ):
return [name.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for v in value]
raise ValueError(F'''Attn error with {name}''' )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=13 ):
snake_case_ = input_string
if string.split('''.''' )[0] == "timestep_embed":
return string.replace('''timestep_embed''' , '''time_proj''' )
snake_case_ = 0
if string.startswith('''net.3.''' ):
depth += 1
snake_case_ = string[6:]
elif string.startswith('''net.''' ):
snake_case_ = string[4:]
while string.startswith('''main.7.''' ):
depth += 1
snake_case_ = string[7:]
if string.startswith('''main.''' ):
snake_case_ = string[5:]
# mid block
if string[:2].isdigit():
snake_case_ = string[:2]
snake_case_ = string[2:]
else:
snake_case_ = string[0]
snake_case_ = string[1:]
if depth == max_depth:
snake_case_ = MID_NUM_TO_LAYER[layer_num]
snake_case_ = '''mid_block'''
elif depth > 0 and int(SCREAMING_SNAKE_CASE__ ) < 7:
snake_case_ = DOWN_NUM_TO_LAYER[layer_num]
snake_case_ = F'''down_blocks.{depth}'''
elif depth > 0 and int(SCREAMING_SNAKE_CASE__ ) > 7:
snake_case_ = UP_NUM_TO_LAYER[layer_num]
snake_case_ = F'''up_blocks.{max_depth - depth - 1}'''
elif depth == 0:
snake_case_ = DEPTH_0_TO_LAYER[layer_num]
snake_case_ = F'''up_blocks.{max_depth - 1}''' if int(SCREAMING_SNAKE_CASE__ ) > 3 else '''down_blocks.0'''
if not string_left.startswith('''.''' ):
raise ValueError(F'''Naming error with {input_string} and string_left: {string_left}.''' )
snake_case_ = string_left[1:]
if "resnets" in new_layer:
snake_case_ = convert_resconv_naming(SCREAMING_SNAKE_CASE__ )
elif "attentions" in new_layer:
snake_case_ = convert_attn_naming(SCREAMING_SNAKE_CASE__ )
snake_case_ = new_string_left
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = prefix + '''.''' + new_layer + '''.''' + string_left
else:
snake_case_ = [prefix + '''.''' + new_layer + '''.''' + s for s in string_left]
return new_string
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = {}
for k, v in state_dict.items():
if k.endswith('''kernel''' ):
# up- and downsample layers, don't have trainable weights
continue
snake_case_ = rename(SCREAMING_SNAKE_CASE__ )
# check if we need to transform from Conv => Linear for attention
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = transform_conv_attns(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
snake_case_ = v
return new_state_dict
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if len(SCREAMING_SNAKE_CASE__ ) == 1:
if len(v.shape ) == 3:
# weight
snake_case_ = v[:, :, 0]
else:
# bias
snake_case_ = v
else:
# qkv matrices
snake_case_ = v.shape[0]
snake_case_ = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
snake_case_ = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
snake_case_ = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
snake_case_ = args.model_path.split('''/''' )[-1].split('''.''' )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), F'''Make sure to provide one of the official model names {MODELS_MAP.keys()}'''
snake_case_ = download(SCREAMING_SNAKE_CASE__ )
snake_case_ = MODELS_MAP[model_name]['''sample_rate''']
snake_case_ = MODELS_MAP[model_name]['''sample_size''']
snake_case_ = Object()
snake_case_ = sample_size
snake_case_ = sample_rate
snake_case_ = 0
snake_case_ = UNetaDModel(sample_size=SCREAMING_SNAKE_CASE__ , sample_rate=SCREAMING_SNAKE_CASE__ )
snake_case_ = diffusers_model.state_dict()
snake_case_ = DiffusionUncond(SCREAMING_SNAKE_CASE__ )
orig_model.load_state_dict(torch.load(args.model_path , map_location=SCREAMING_SNAKE_CASE__ )['''state_dict'''] )
snake_case_ = orig_model.diffusion_ema.eval()
snake_case_ = orig_model.state_dict()
snake_case_ = rename_orig_weights(SCREAMING_SNAKE_CASE__ )
snake_case_ = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
snake_case_ = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(SCREAMING_SNAKE_CASE__ ) == 0, F'''Problem with {renamed_minus_diffusers}'''
assert all(k.endswith('''kernel''' ) for k in list(SCREAMING_SNAKE_CASE__ ) ), F'''Problem with {diffusers_minus_renamed}'''
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), F'''Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'''
if key == "time_proj.weight":
snake_case_ = value.squeeze()
snake_case_ = value
diffusers_model.load_state_dict(SCREAMING_SNAKE_CASE__ )
snake_case_ = 100
snake_case_ = 33
snake_case_ = IPNDMScheduler(num_train_timesteps=SCREAMING_SNAKE_CASE__ )
snake_case_ = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
snake_case_ = torch.randn([1, 2, config.sample_size] , generator=SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
snake_case_ = torch.linspace(1 , 0 , steps + 1 , device=SCREAMING_SNAKE_CASE__ )[:-1]
snake_case_ = get_crash_schedule(SCREAMING_SNAKE_CASE__ )
snake_case_ = DanceDiffusionPipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
snake_case_ = torch.manual_seed(33 )
snake_case_ = pipe(num_inference_steps=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ ).audios
snake_case_ = sampling.iplms_sample(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , {} )
snake_case_ = generated.clamp(-1 , 1 )
snake_case_ = (generated - audio).abs().sum()
snake_case_ = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print('''Diff sum''' , SCREAMING_SNAKE_CASE__ )
print('''Diff max''' , SCREAMING_SNAKE_CASE__ )
assert diff_max < 1E-3, F'''Diff max: {diff_max} is too much :-/'''
print(F'''Conversion for {model_name} successful!''' )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
lowerCAmelCase_ = parser.parse_args()
main(args) | 39 | 1 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
snake_case_ = True if '''large''' in model_name or '''huge''' in model_name else False
snake_case_ = True if '''large''' in model_name or '''huge''' in model_name else False
snake_case_ = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
snake_case_ = [3, 3, 3, 3]
snake_case_ = [5, 5, 5, 5]
elif "fl4" in model_name:
snake_case_ = [4, 4, 4, 4]
snake_case_ = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
snake_case_ = [3, 3, 3, 3]
if "lrf" in model_name:
snake_case_ = [3, 3, 3, 3]
else:
snake_case_ = [2, 2, 2, 2]
if "tiny" in model_name:
snake_case_ = 96
elif "small" in model_name:
snake_case_ = 96
elif "base" in model_name:
snake_case_ = 128
elif "large" in model_name:
snake_case_ = 192
elif "xlarge" in model_name:
snake_case_ = 256
elif "huge" in model_name:
snake_case_ = 352
# set label information
snake_case_ = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
snake_case_ = '''imagenet-22k-id2label.json'''
else:
snake_case_ = '''imagenet-1k-id2label.json'''
snake_case_ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='''dataset''' ) , '''r''' ) )
snake_case_ = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
snake_case_ = {v: k for k, v in idalabel.items()}
snake_case_ = FocalNetConfig(
embed_dim=SCREAMING_SNAKE_CASE__ , depths=SCREAMING_SNAKE_CASE__ , focal_levels=SCREAMING_SNAKE_CASE__ , focal_windows=SCREAMING_SNAKE_CASE__ , use_conv_embed=SCREAMING_SNAKE_CASE__ , idalabel=SCREAMING_SNAKE_CASE__ , labelaid=SCREAMING_SNAKE_CASE__ , use_post_layernorm=SCREAMING_SNAKE_CASE__ , use_layerscale=SCREAMING_SNAKE_CASE__ , )
return config
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
if "patch_embed.proj" in name:
snake_case_ = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
snake_case_ = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
snake_case_ = '''encoder.''' + name
if "encoder.layers" in name:
snake_case_ = name.replace('''encoder.layers''' , '''encoder.stages''' )
if "downsample.proj" in name:
snake_case_ = name.replace('''downsample.proj''' , '''downsample.projection''' )
if "blocks" in name:
snake_case_ = name.replace('''blocks''' , '''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
snake_case_ = name.replace('''modulation.f''' , '''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
snake_case_ = name.replace('''modulation.h''' , '''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
snake_case_ = name.replace('''modulation.proj''' , '''modulation.projection_out''' )
if name == "norm.weight":
snake_case_ = '''layernorm.weight'''
if name == "norm.bias":
snake_case_ = '''layernorm.bias'''
if "head" in name:
snake_case_ = name.replace('''head''' , '''classifier''' )
else:
snake_case_ = '''focalnet.''' + name
return name
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ):
# fmt: off
snake_case_ = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
snake_case_ = model_name_to_url[model_name]
print('''Checkpoint URL: ''' , SCREAMING_SNAKE_CASE__ )
snake_case_ = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ , map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
snake_case_ = state_dict.pop(SCREAMING_SNAKE_CASE__ )
snake_case_ = val
snake_case_ = get_focalnet_config(SCREAMING_SNAKE_CASE__ )
snake_case_ = FocalNetForImageClassification(SCREAMING_SNAKE_CASE__ )
model.eval()
# load state dict
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
# verify conversion
snake_case_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case_ = BitImageProcessor(
do_resize=SCREAMING_SNAKE_CASE__ , size={'''shortest_edge''': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=SCREAMING_SNAKE_CASE__ , crop_size=224 , do_normalize=SCREAMING_SNAKE_CASE__ , image_mean=SCREAMING_SNAKE_CASE__ , image_std=SCREAMING_SNAKE_CASE__ , )
snake_case_ = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
snake_case_ = processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' )
snake_case_ = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
snake_case_ = image_transforms(SCREAMING_SNAKE_CASE__ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , SCREAMING_SNAKE_CASE__ , atol=1E-4 )
snake_case_ = model(**SCREAMING_SNAKE_CASE__ )
snake_case_ = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
snake_case_ = torch.tensor([0.2166, -0.4368, 0.2191] )
elif model_name == "focalnet-tiny-lrf":
snake_case_ = torch.tensor([1.1669, 0.0125, -0.1695] )
elif model_name == "focalnet-small":
snake_case_ = torch.tensor([0.4917, -0.0430, 0.1341] )
elif model_name == "focalnet-small-lrf":
snake_case_ = torch.tensor([-0.2588, -0.5342, -0.2331] )
elif model_name == "focalnet-base":
snake_case_ = torch.tensor([-0.1655, -0.4090, -0.1730] )
elif model_name == "focalnet-base-lrf":
snake_case_ = torch.tensor([0.5306, -0.0483, -0.3928] )
assert torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and processor of {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
print(F'''Pushing model and processor of {model_name} to the hub...''' )
model.push_to_hub(F'''{model_name}''' )
processor.push_to_hub(F'''{model_name}''' )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''focalnet-tiny''',
type=str,
help='''Name of the FocalNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub.''',
)
lowerCAmelCase_ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 39 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {'''configuration_vit_msn''': ['''VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMSNConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMSNModel''',
'''ViTMSNForImageClassification''',
'''ViTMSNPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 39 | 1 |
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase_ = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class snake_case_ ( __A , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = XLMProphetNetTokenizer
SCREAMING_SNAKE_CASE : Optional[Any] = False
SCREAMING_SNAKE_CASE : Optional[int] = True
def snake_case__( self : int ) ->Any:
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ = XLMProphetNetTokenizer(_UpperCamelCase , keep_accents=_UpperCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case__( self : Dict ) ->int:
snake_case_ = '''[PAD]'''
snake_case_ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCamelCase ) , _UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCamelCase ) , _UpperCamelCase )
def snake_case__( self : Union[str, Any] ) ->List[Any]:
snake_case_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''[PAD]''' )
self.assertEqual(vocab_keys[1] , '''[CLS]''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(_UpperCamelCase ) , 1_0_1_2 )
def snake_case__( self : Tuple ) ->List[str]:
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_1_2 )
def snake_case__( self : Optional[int] ) ->Optional[Any]:
snake_case_ = XLMProphetNetTokenizer(_UpperCamelCase , keep_accents=_UpperCamelCase )
snake_case_ = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_UpperCamelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
snake_case_ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_UpperCamelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
snake_case_ = tokenizer.convert_tokens_to_ids(_UpperCamelCase )
self.assertListEqual(
_UpperCamelCase , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, -9, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, -9, 4]
] , )
snake_case_ = tokenizer.convert_ids_to_tokens(_UpperCamelCase )
self.assertListEqual(
_UpperCamelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''[UNK]''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''[UNK]''',
'''.''',
] , )
@cached_property
def snake_case__( self : int ) ->Dict:
return XLMProphetNetTokenizer.from_pretrained('''microsoft/xprophetnet-large-wiki100-cased''' )
@slow
def snake_case__( self : Optional[int] ) ->int:
snake_case_ = '''Hello World!'''
snake_case_ = [3_5_3_8_9, 6_6_7_2, 4_9, 2]
self.assertListEqual(_UpperCamelCase , self.big_tokenizer.encode(_UpperCamelCase ) )
@slow
def snake_case__( self : str ) ->List[Any]:
# fmt: off
snake_case_ = {'''input_ids''': [[1_1_0_7_3, 8_2_7_8_3, 1_8, 2_6, 8_2_7_8_3, 5_4_9, 5_1_5_4_0, 2_4_8, 1_7_2_0_9, 1_3_0_1, 2_1_7, 2_0, 2_1_5_1_8_6, 1_3_2_5, 1_4_7, 1_7_2_0_9, 1_3_0_1, 2_1_7, 2_0, 5_6_3_7_0, 5_3, 1_2_2_0_2_0, 2_0, 1_6_4_7_7, 2_7, 8_7_3_5_5, 4_5_4_8, 2_0, 4_7_2_8, 7_8_3_9_2, 1_7, 1_5_9_9_6_9, 1_8, 2_6, 2_4_4_9_1, 6_2_9, 1_5, 5_3_8, 2_2_7_0_4, 5_4_3_9, 1_5, 2_7_8_8, 2_4_4_9_1, 9_8_8_5, 1_5, 4_3_5_3_4, 6_0_5, 1_5, 8_1_4, 1_8_4_0_3, 3_3_2_0_0, 2_9, 1_5, 4_3_5_3_4, 2_4_4_5_8, 1_2_4_1_0, 1_1_1, 2_4_9_6_6, 8_3_6_6_9, 9_6_3_7, 1_4_4_0_6_8, 2_6, 8_5_0, 2_2_3_4_6, 2_7, 1_4_7, 2_4_9_6_6, 8_3_6_6_9, 8_3_4_9_0, 2_6, 3_9_1_1_3, 7_3_5, 2_7, 6_8_9, 6_5_6, 2_8_0_0, 1_3_3_9, 4_6_0_0, 5_3, 1_2_2_0_2_0, 1_1_5_7_8_5, 3_4, 8_1_6, 1_3_3_9, 4_6_8_8_7, 1_8, 1_4_7, 5_3_9_0_5, 1_9_5_1, 4_2_2_3_8, 4_1_1_7_0, 1_7_7_3_2, 8_3_4, 4_3_6, 1_5, 2_7_5_2_3, 9_8_7_3_3, 2_1_7, 1_4_7, 5_5_4_2, 4_9_8_1, 9_3_0, 1_7_3_4_7, 1_6, 2], [2_0_0_9_1, 6_2_9, 9_4, 8_2_7_8_6, 5_8, 4_9_0, 2_0, 1_5_2_8, 8_4, 5_3_9_0_5, 3_4_4, 8_0_5_9_2, 1_1_0_1_2_8, 1_8_8_2_2, 5_2_6_7, 1_3_0_6, 6_2, 1_5_2_5_3_7, 3_0_8, 7_9_9_7, 4_0_1, 1_2_4_4_2_7, 5_4_9, 3_5_4_4_2, 2_2_5, 1_0_9, 1_5_0_5_5, 2_5_7_4_8, 1_4_7, 7_1_1_9, 4_3_7_1_2, 3_4, 7_6_7, 1_3_5_3_6_6, 1_8, 1_6, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_9_2, 6_3_7_8_4, 1_1_9_4_6_6, 1_7, 1_4_7_8_0_8, 8_8_2_1_4, 1_8, 6_5_6, 8_1, 3_2, 3_2_9_6, 1_0_2_8_0, 1_6, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCamelCase , model_name='''microsoft/xprophetnet-large-wiki100-cased''' , revision='''1acad1643ddd54a44df6a1b797ada8373685d90e''' , ) | 39 |
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : Optional[Any] ) ->Any:
snake_case_ = tf.convert_to_tensor(
[
[
8.2220991, # 3rd highest value; idx. 0
-0.5620044,
5.23229752,
4.0386393,
-6.8798378,
-0.54785802,
-3.2012153,
2.92777176,
1.88171953,
7.35341276, # 5th highest value; idx. 9
8.43207833, # 2nd highest value; idx. 10
-9.85711836,
-5.96209236,
-1.13039161,
-7.1115294,
-0.8369633,
-5.3186408,
7.06427407,
0.81369344,
-0.82023817,
-5.9179796,
0.58813443,
-6.99778438,
4.71551189,
-0.18771637,
7.44020759, # 4th highest value; idx. 25
9.38450987, # 1st highest value; idx. 26
2.12662941,
-9.32562038,
2.35652522,
], # cummulative prob of 5 highest values <= 0.6
[
0.58425518,
4.53139238,
-5.57510464,
-6.28030699,
-7.19529503,
-4.02122551,
1.39337037,
-6.06707057,
1.59480517,
-9.643119,
0.03907799,
0.67231762,
-8.88206726,
6.27115922, # 4th highest value; idx. 13
2.28520723,
4.82767506,
4.30421368,
8.8275313, # 2nd highest value; idx. 17
5.44029958, # 5th highest value; idx. 18
-4.4735794,
7.38579536, # 3rd highest value; idx. 20
-2.91051663,
2.61946077,
-2.5674762,
-9.48959302,
-4.02922645,
-1.35416918,
9.67702323, # 1st highest value; idx. 27
-5.89478553,
1.85370467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
snake_case_ = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 1_0], [0, 2_5], [0, 2_6], [1, 1_3], [1, 1_7], [1, 1_8], [1, 2_0], [1, 2_7]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
snake_case_ = tf.convert_to_tensor(
[8.222099, 7.3534126, 8.432078, 7.4402075, 9.38451, 6.271159, 8.827531, 5.4402995, 7.3857956, 9.677023] , dtype=tf.floataa , ) # expected non filtered values as noted above
snake_case_ = tf_top_k_top_p_filtering(_UpperCamelCase , top_k=1_0 , top_p=0.6 , min_tokens_to_keep=4 )
snake_case_ = output[output != -float('''inf''' )]
snake_case_ = tf.cast(
tf.where(tf.not_equal(_UpperCamelCase , tf.constant(-float('''inf''' ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(_UpperCamelCase , _UpperCamelCase , rtol=1e-12 )
tf.debugging.assert_equal(_UpperCamelCase , _UpperCamelCase )
@require_tf
class snake_case_ ( unittest.TestCase , __A ):
'''simple docstring'''
if is_tf_available():
SCREAMING_SNAKE_CASE : Optional[int] = {
"AutoModelForCausalLM": TFAutoModelForCausalLM,
"AutoModelForSpeechSeq2Seq": TFAutoModelForSpeechSeqaSeq,
"AutoModelForSeq2SeqLM": TFAutoModelForSeqaSeqLM,
"AutoModelForVision2Seq": TFAutoModelForVisionaSeq,
"LogitsProcessorList": TFLogitsProcessorList,
"MinLengthLogitsProcessor": TFMinLengthLogitsProcessor,
"create_tensor_fn": tf.convert_to_tensor,
"floats_tensor": floats_tensor,
"return_tensors": "tf",
}
@slow
def snake_case__( self : List[Any] ) ->Optional[int]:
# TF-only test: tf.saved_model export
snake_case_ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
snake_case_ = 2
snake_case_ = 2
class snake_case_ ( tf.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , _UpperCamelCase : Optional[int] ) ->List[Any]:
super(_UpperCamelCase , self ).__init__()
snake_case_ = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((None, input_length) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=_UpperCamelCase , )
def snake_case__( self : List[Any] , _UpperCamelCase : int , _UpperCamelCase : Union[str, Any] ) ->List[Any]:
snake_case_ = self.model.generate(
input_ids=_UpperCamelCase , attention_mask=_UpperCamelCase , max_new_tokens=_UpperCamelCase , return_dict_in_generate=_UpperCamelCase , )
return {"sequences": outputs["sequences"]}
snake_case_ = [[2, 0], [1_0_2, 1_0_3]]
snake_case_ = [[1, 0], [1, 1]]
snake_case_ = DummyModel(model=_UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(_UpperCamelCase , _UpperCamelCase , signatures={'''serving_default''': dummy_model.serving} )
snake_case_ = tf.saved_model.load(_UpperCamelCase ).signatures['''serving_default''']
for batch_size in range(1 , len(_UpperCamelCase ) + 1 ):
snake_case_ = {
'''input_ids''': tf.constant(dummy_input_ids[:batch_size] ),
'''attention_mask''': tf.constant(dummy_attention_masks[:batch_size] ),
}
snake_case_ = serving_func(**_UpperCamelCase )['''sequences''']
snake_case_ = test_model.generate(**_UpperCamelCase , max_new_tokens=_UpperCamelCase )
tf.debugging.assert_equal(_UpperCamelCase , _UpperCamelCase )
@slow
def snake_case__( self : List[str] ) ->int:
# TF-only test: tf.saved_model export
snake_case_ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
snake_case_ = 1
snake_case_ = 2
class snake_case_ ( tf.Module ):
'''simple docstring'''
def __init__( self : str , _UpperCamelCase : Any ) ->List[str]:
super(_UpperCamelCase , self ).__init__()
snake_case_ = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((batch_size, None) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=_UpperCamelCase , )
def snake_case__( self : int , _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] ) ->Optional[int]:
snake_case_ = self.model.generate(
input_ids=_UpperCamelCase , attention_mask=_UpperCamelCase , max_new_tokens=_UpperCamelCase , return_dict_in_generate=_UpperCamelCase , )
return {"sequences": outputs["sequences"]}
snake_case_ = [[2], [1_0_2, 1_0_3]]
snake_case_ = [[1], [1, 1]]
snake_case_ = DummyModel(model=_UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(_UpperCamelCase , _UpperCamelCase , signatures={'''serving_default''': dummy_model.serving} )
snake_case_ = tf.saved_model.load(_UpperCamelCase ).signatures['''serving_default''']
for input_row in range(len(_UpperCamelCase ) ):
snake_case_ = {
'''input_ids''': tf.constant([dummy_input_ids[input_row]] ),
'''attention_mask''': tf.constant([dummy_attention_masks[input_row]] ),
}
snake_case_ = serving_func(**_UpperCamelCase )['''sequences''']
snake_case_ = test_model.generate(**_UpperCamelCase , max_new_tokens=_UpperCamelCase )
tf.debugging.assert_equal(_UpperCamelCase , _UpperCamelCase )
@slow
@require_tensorflow_text
def snake_case__( self : Optional[Any] ) ->List[Any]:
# TF-only test: tf.saved_model export
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id='''google/flan-t5-small''' , filename='''spiece.model''' , local_dir=_UpperCamelCase )
class snake_case_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Tuple ) ->List[Any]:
super().__init__()
snake_case_ = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(_UpperCamelCase , '''spiece.model''' ) , '''rb''' ).read() )
snake_case_ = TFAutoModelForSeqaSeqLM.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
def snake_case__( self : Optional[Any] , _UpperCamelCase : List[Any] , *_UpperCamelCase : Optional[int] , **_UpperCamelCase : str ) ->List[Any]:
snake_case_ = self.tokenizer.tokenize(_UpperCamelCase )
snake_case_, snake_case_ = text.pad_model_inputs(
_UpperCamelCase , max_seq_length=6_4 , pad_value=self.model.config.pad_token_id )
snake_case_ = self.model.generate(input_ids=_UpperCamelCase , attention_mask=_UpperCamelCase )
return self.tokenizer.detokenize(_UpperCamelCase )
snake_case_ = CompleteSentenceTransformer()
snake_case_ = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='''inputs''' )
snake_case_ = complete_model(_UpperCamelCase )
snake_case_ = tf.keras.Model(_UpperCamelCase , _UpperCamelCase )
keras_model.save(_UpperCamelCase )
def snake_case__( self : Any ) ->List[Any]:
# Has PT equivalent: this test relies on random sampling
snake_case_ = {
'''do_sample''': True,
'''num_beams''': 1,
'''top_p''': 0.7,
'''top_k''': 1_0,
'''temperature''': 0.7,
}
snake_case_ = 1_4
snake_case_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
snake_case_ = '''Hello, my dog is cute and'''
snake_case_ = tokenizer(_UpperCamelCase , return_tensors='''tf''' )
snake_case_ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
snake_case_ = 6_3_8
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
snake_case_ = model.generate(**_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
self.assertTrue(expectation == len(generated_tokens[0] ) )
snake_case_ = [6_3_8, 1_9_8]
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
snake_case_ = model.generate(**_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def snake_case__( self : str ) ->Dict:
# Has PT equivalent: ample use of framework-specific code
snake_case_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
snake_case_ = '''Hugging Face is a technology company based in New York and Paris.'''
snake_case_ = bart_tokenizer(_UpperCamelCase , return_tensors='''tf''' ).input_ids
snake_case_ = TFBartForConditionalGeneration.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
snake_case_ = bart_model.generate(_UpperCamelCase ).numpy()
class snake_case_ ( __A ):
'''simple docstring'''
def snake_case__( self : str , _UpperCamelCase : Any , _UpperCamelCase : Tuple=None , **_UpperCamelCase : Optional[int] ) ->List[str]:
return super().call(_UpperCamelCase , **_UpperCamelCase )
snake_case_ = FakeBart.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
snake_case_ = bart_model.generate(_UpperCamelCase , foo='''bar''' ).numpy()
self.assertTrue(np.array_equal(_UpperCamelCase , _UpperCamelCase ) )
class snake_case_ ( bart_model.model.encoder.__class__ ):
'''simple docstring'''
def snake_case__( self : Union[str, Any] , _UpperCamelCase : str , **_UpperCamelCase : Tuple ) ->Optional[Any]:
return super().call(_UpperCamelCase , **_UpperCamelCase )
snake_case_ = FakeEncoder(bart_model.config , bart_model.model.shared )
snake_case_ = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
snake_case_ = bart_model.generate(_UpperCamelCase ).numpy()
with self.assertRaises(_UpperCamelCase ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(_UpperCamelCase , foo='''bar''' ) | 39 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ = {
'''configuration_distilbert''': [
'''DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''DistilBertConfig''',
'''DistilBertOnnxConfig''',
],
'''tokenization_distilbert''': ['''DistilBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''DistilBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DistilBertForMaskedLM''',
'''DistilBertForMultipleChoice''',
'''DistilBertForQuestionAnswering''',
'''DistilBertForSequenceClassification''',
'''DistilBertForTokenClassification''',
'''DistilBertModel''',
'''DistilBertPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDistilBertForMaskedLM''',
'''TFDistilBertForMultipleChoice''',
'''TFDistilBertForQuestionAnswering''',
'''TFDistilBertForSequenceClassification''',
'''TFDistilBertForTokenClassification''',
'''TFDistilBertMainLayer''',
'''TFDistilBertModel''',
'''TFDistilBertPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''FlaxDistilBertForMaskedLM''',
'''FlaxDistilBertForMultipleChoice''',
'''FlaxDistilBertForQuestionAnswering''',
'''FlaxDistilBertForSequenceClassification''',
'''FlaxDistilBertForTokenClassification''',
'''FlaxDistilBertModel''',
'''FlaxDistilBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 39 |
import unittest
from transformers import DonutProcessor
lowerCAmelCase_ = '''naver-clova-ix/donut-base'''
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : Union[str, Any] ) ->Any:
snake_case_ = DonutProcessor.from_pretrained(_UpperCamelCase )
def snake_case__( self : Dict ) ->str:
snake_case_ = {
'''name''': '''John Doe''',
'''age''': '''99''',
'''city''': '''Atlanta''',
'''state''': '''GA''',
'''zip''': '''30301''',
'''phone''': '''123-4567''',
'''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}],
}
snake_case_ = (
'''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'''
'''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'''
'''<s_nicknames><s_nickname>Johnny</s_nickname>'''
'''<sep/><s_nickname>JD</s_nickname></s_nicknames>'''
)
snake_case_ = self.processor.tokenajson(_UpperCamelCase )
self.assertDictEqual(_UpperCamelCase , _UpperCamelCase ) | 39 | 1 |
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = tmp_path / '''cache'''
snake_case_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
snake_case_ = JsonDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ ).read()
_check_json_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = tmp_path / '''cache'''
snake_case_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
snake_case_ = features.copy() if features else default_expected_features
snake_case_ = (
Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case_ = JsonDatasetReader(SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_json_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''},
] , )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = tmp_path / '''cache'''
snake_case_ = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}
snake_case_ = features.copy() if features else default_expected_features
snake_case_ = (
Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case_ = JsonDatasetReader(SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
snake_case_ = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''}
snake_case_ = features.copy()
snake_case_ = (
Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case_ = tmp_path / '''cache'''
snake_case_ = JsonDatasetReader(SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = tmp_path / '''cache'''
snake_case_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
snake_case_ = JsonDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , split=SCREAMING_SNAKE_CASE__ ).read()
_check_json_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = jsonl_path
elif issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = [jsonl_path]
snake_case_ = tmp_path / '''cache'''
snake_case_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
snake_case_ = JsonDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_json_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=("train",) ):
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for split in splits:
snake_case_ = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = tmp_path / '''cache'''
snake_case_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
snake_case_ = JsonDatasetReader({'''train''': jsonl_path} , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ ).read()
_check_json_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = tmp_path / '''cache'''
snake_case_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
snake_case_ = features.copy() if features else default_expected_features
snake_case_ = (
Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case_ = JsonDatasetReader({'''train''': jsonl_path} , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_json_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if split:
snake_case_ = {split: jsonl_path}
else:
snake_case_ = '''train'''
snake_case_ = {'''train''': jsonl_path, '''test''': jsonl_path}
snake_case_ = tmp_path / '''cache'''
snake_case_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
snake_case_ = JsonDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_json_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
return json.load(SCREAMING_SNAKE_CASE__ )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
return [json.loads(SCREAMING_SNAKE_CASE__ ) for line in buffer]
class snake_case_ :
'''simple docstring'''
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def snake_case__( self : str , _UpperCamelCase : Tuple , _UpperCamelCase : Tuple , _UpperCamelCase : int ) ->str:
with io.BytesIO() as buffer:
JsonDatasetWriter(_UpperCamelCase , _UpperCamelCase , lines=_UpperCamelCase ).write()
buffer.seek(0 )
snake_case_ = load_json_function(_UpperCamelCase )
assert isinstance(_UpperCamelCase , _UpperCamelCase )
assert isinstance(exported_content[0] , _UpperCamelCase )
assert len(_UpperCamelCase ) == 1_0
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def snake_case__( self : int , _UpperCamelCase : Optional[Any] , _UpperCamelCase : int , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Dict , _UpperCamelCase : List[Any] ) ->List[Any]:
with io.BytesIO() as buffer:
JsonDatasetWriter(_UpperCamelCase , _UpperCamelCase , lines=_UpperCamelCase , orient=_UpperCamelCase ).write()
buffer.seek(0 )
snake_case_ = load_json(_UpperCamelCase )
assert isinstance(_UpperCamelCase , _UpperCamelCase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(_UpperCamelCase , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 1_0
else:
assert len(_UpperCamelCase ) == 1_0
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def snake_case__( self : str , _UpperCamelCase : Any , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : int ) ->Tuple:
with io.BytesIO() as buffer:
JsonDatasetWriter(_UpperCamelCase , _UpperCamelCase , lines=_UpperCamelCase , num_proc=2 ).write()
buffer.seek(0 )
snake_case_ = load_json_function(_UpperCamelCase )
assert isinstance(_UpperCamelCase , _UpperCamelCase )
assert isinstance(exported_content[0] , _UpperCamelCase )
assert len(_UpperCamelCase ) == 1_0
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def snake_case__( self : List[str] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Any , _UpperCamelCase : str , _UpperCamelCase : Optional[Any] , _UpperCamelCase : str ) ->Optional[Any]:
with io.BytesIO() as buffer:
JsonDatasetWriter(_UpperCamelCase , _UpperCamelCase , lines=_UpperCamelCase , orient=_UpperCamelCase , num_proc=2 ).write()
buffer.seek(0 )
snake_case_ = load_json(_UpperCamelCase )
assert isinstance(_UpperCamelCase , _UpperCamelCase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(_UpperCamelCase , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 1_0
else:
assert len(_UpperCamelCase ) == 1_0
def snake_case__( self : List[str] , _UpperCamelCase : Union[str, Any] ) ->Union[str, Any]:
with pytest.raises(_UpperCamelCase ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_UpperCamelCase , _UpperCamelCase , num_proc=0 )
@pytest.mark.parametrize('''compression, extension''' , [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] )
def snake_case__( self : Tuple , _UpperCamelCase : Tuple , _UpperCamelCase : Tuple , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[str] , _UpperCamelCase : List[Any] ) ->List[Any]:
snake_case_ = tmp_path_factory.mktemp('''data''' ) / f'''test.json.{extension}'''
snake_case_ = str(shared_datadir / f'''test_file.json.{extension}''' )
JsonDatasetWriter(_UpperCamelCase , _UpperCamelCase , compression=_UpperCamelCase ).write()
with fsspec.open(_UpperCamelCase , '''rb''' , compression='''infer''' ) as f:
snake_case_ = f.read()
with fsspec.open(_UpperCamelCase , '''rb''' , compression='''infer''' ) as f:
snake_case_ = f.read()
assert exported_content == original_content | 39 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
if not nums:
raise ValueError('''List is empty''' )
return sum(SCREAMING_SNAKE_CASE__ ) / len(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod() | 39 | 1 |
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class snake_case_ ( __A , __A ):
'''simple docstring'''
@register_to_config
def __init__( self : Dict , *,
_UpperCamelCase : int = 4 , _UpperCamelCase : int = 7_6_8 , _UpperCamelCase : int , _UpperCamelCase : Dict , ) ->int:
super().__init__()
snake_case_ = nn.Parameter(torch.zeros(_UpperCamelCase ) )
# parameters for additional clip time embeddings
snake_case_ = nn.Linear(_UpperCamelCase , _UpperCamelCase )
snake_case_ = nn.Linear(_UpperCamelCase , _UpperCamelCase )
# parameters for encoder hidden states
snake_case_ = clip_extra_context_tokens
snake_case_ = nn.Linear(
_UpperCamelCase , self.clip_extra_context_tokens * cross_attention_dim )
snake_case_ = nn.Linear(_UpperCamelCase , _UpperCamelCase )
snake_case_ = nn.LayerNorm(_UpperCamelCase )
def snake_case__( self : Any , *, _UpperCamelCase : Optional[int] , _UpperCamelCase : str , _UpperCamelCase : List[str] , _UpperCamelCase : Union[str, Any] ) ->str:
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
snake_case_ = image_embeddings.shape[0]
snake_case_ = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
snake_case_ = classifier_free_guidance_embeddings.expand(
_UpperCamelCase , -1 )
snake_case_ = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
snake_case_ = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
snake_case_ = self.embedding_proj(_UpperCamelCase )
snake_case_ = self.clip_image_embeddings_project_to_time_embeddings(_UpperCamelCase )
snake_case_ = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
snake_case_ = self.clip_extra_context_tokens_proj(_UpperCamelCase )
snake_case_ = clip_extra_context_tokens.reshape(_UpperCamelCase , -1 , self.clip_extra_context_tokens )
snake_case_ = clip_extra_context_tokens.permute(0 , 2 , 1 )
snake_case_ = self.encoder_hidden_states_proj(_UpperCamelCase )
snake_case_ = self.text_encoder_hidden_states_norm(_UpperCamelCase )
snake_case_ = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings | 39 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : List[str] ) ->str:
snake_case_ = inspect.getfile(accelerate.test_utils )
snake_case_ = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps''', '''test_metrics.py'''] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
snake_case_ = test_metrics
@require_cpu
def snake_case__( self : str ) ->int:
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def snake_case__( self : Union[str, Any] ) ->Any:
debug_launcher(self.test_metrics.main )
@require_single_gpu
def snake_case__( self : List[Any] ) ->Tuple:
self.test_metrics.main()
@require_multi_gpu
def snake_case__( self : Any ) ->Union[str, Any]:
print(f'''Found {torch.cuda.device_count()} devices.''' )
snake_case_ = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() ) | 39 | 1 |
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class snake_case_ :
'''simple docstring'''
def __init__( self : Tuple , _UpperCamelCase : Optional[int] , _UpperCamelCase : Union[str, Any]=1_3 , _UpperCamelCase : Dict=7 , _UpperCamelCase : List[str]=True , _UpperCamelCase : Any=True , _UpperCamelCase : Union[str, Any]=True , _UpperCamelCase : List[Any]=True , _UpperCamelCase : str=9_9 , _UpperCamelCase : str=3_2 , _UpperCamelCase : Union[str, Any]=2 , _UpperCamelCase : Optional[Any]=4 , _UpperCamelCase : Any=3_7 , _UpperCamelCase : Tuple="gelu" , _UpperCamelCase : Optional[Any]=0.1 , _UpperCamelCase : List[Any]=0.1 , _UpperCamelCase : Union[str, Any]=5_1_2 , _UpperCamelCase : Optional[int]=1_6 , _UpperCamelCase : Tuple=2 , _UpperCamelCase : Optional[Any]=0.02 , _UpperCamelCase : Union[str, Any]=3 , _UpperCamelCase : Optional[Any]=4 , _UpperCamelCase : List[str]=None , ) ->Tuple:
snake_case_ = parent
snake_case_ = 1_3
snake_case_ = 7
snake_case_ = True
snake_case_ = True
snake_case_ = True
snake_case_ = True
snake_case_ = 9_9
snake_case_ = 3_8_4
snake_case_ = 2
snake_case_ = 4
snake_case_ = 3_7
snake_case_ = '''gelu'''
snake_case_ = 0.1
snake_case_ = 0.1
snake_case_ = 5_1_2
snake_case_ = 1_6
snake_case_ = 2
snake_case_ = 0.02
snake_case_ = 3
snake_case_ = 4
snake_case_ = 1_2_8
snake_case_ = 2
snake_case_ = 9
snake_case_ = 1
snake_case_ = None
def snake_case__( self : Optional[Any] ) ->Optional[int]:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_input_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ = None
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_UpperCamelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__( self : Optional[int] , _UpperCamelCase : Any , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Any , _UpperCamelCase : Dict , _UpperCamelCase : Optional[Any] , _UpperCamelCase : int ) ->Optional[int]:
snake_case_ = TFConvBertModel(config=_UpperCamelCase )
snake_case_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
snake_case_ = [input_ids, input_mask]
snake_case_ = model(_UpperCamelCase )
snake_case_ = model(_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__( self : str , _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : Tuple , _UpperCamelCase : Union[str, Any] ) ->Optional[int]:
snake_case_ = TFConvBertForMaskedLM(config=_UpperCamelCase )
snake_case_ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
snake_case_ = model(_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__( self : Tuple , _UpperCamelCase : Any , _UpperCamelCase : Tuple , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : int , _UpperCamelCase : str , _UpperCamelCase : int , _UpperCamelCase : List[Any] ) ->Dict:
snake_case_ = self.num_labels
snake_case_ = TFConvBertForSequenceClassification(config=_UpperCamelCase )
snake_case_ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
snake_case_ = model(_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__( self : List[Any] , _UpperCamelCase : Dict , _UpperCamelCase : Optional[int] , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : List[Any] , _UpperCamelCase : int , _UpperCamelCase : List[Any] ) ->str:
snake_case_ = self.num_choices
snake_case_ = TFConvBertForMultipleChoice(config=_UpperCamelCase )
snake_case_ = tf.tile(tf.expand_dims(_UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
snake_case_ = tf.tile(tf.expand_dims(_UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
snake_case_ = tf.tile(tf.expand_dims(_UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
snake_case_ = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
snake_case_ = model(_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case__( self : Any , _UpperCamelCase : int , _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Dict , _UpperCamelCase : Optional[int] ) ->str:
snake_case_ = self.num_labels
snake_case_ = TFConvBertForTokenClassification(config=_UpperCamelCase )
snake_case_ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
snake_case_ = model(_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case__( self : Optional[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : Dict , _UpperCamelCase : Dict , _UpperCamelCase : List[Any] , _UpperCamelCase : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[Any] ) ->Tuple:
snake_case_ = TFConvBertForQuestionAnswering(config=_UpperCamelCase )
snake_case_ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
snake_case_ = model(_UpperCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case__( self : List[str] ) ->List[Any]:
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
),
) = config_and_inputs
snake_case_ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class snake_case_ ( __A , __A , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE : Dict = (
{
"feature-extraction": TFConvBertModel,
"fill-mask": TFConvBertForMaskedLM,
"question-answering": TFConvBertForQuestionAnswering,
"text-classification": TFConvBertForSequenceClassification,
"token-classification": TFConvBertForTokenClassification,
"zero-shot": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE : Dict = False
SCREAMING_SNAKE_CASE : Union[str, Any] = False
SCREAMING_SNAKE_CASE : int = False
def snake_case__( self : str ) ->Union[str, Any]:
snake_case_ = TFConvBertModelTester(self )
snake_case_ = ConfigTester(self , config_class=_UpperCamelCase , hidden_size=3_7 )
def snake_case__( self : List[str] ) ->Optional[Any]:
self.config_tester.run_common_tests()
def snake_case__( self : str ) ->Optional[Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def snake_case__( self : Optional[Any] ) ->List[str]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCamelCase )
def snake_case__( self : str ) ->Dict:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCamelCase )
def snake_case__( self : int ) ->str:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCamelCase )
def snake_case__( self : Optional[int] ) ->str:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCamelCase )
def snake_case__( self : str ) ->Union[str, Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCamelCase )
@slow
def snake_case__( self : str ) ->str:
snake_case_, snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = True
snake_case_ = True
if hasattr(_UpperCamelCase , '''use_cache''' ):
snake_case_ = True
snake_case_ = getattr(self.model_tester , '''encoder_seq_length''' , self.model_tester.seq_length )
snake_case_ = getattr(self.model_tester , '''key_length''' , _UpperCamelCase )
for model_class in self.all_model_classes:
snake_case_ = self._prepare_for_class(_UpperCamelCase , _UpperCamelCase )
snake_case_ = model_class(_UpperCamelCase )
snake_case_ = len(model(_UpperCamelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_UpperCamelCase , saved_model=_UpperCamelCase )
snake_case_ = os.path.join(_UpperCamelCase , '''saved_model''' , '''1''' )
snake_case_ = tf.keras.models.load_model(_UpperCamelCase )
snake_case_ = model(_UpperCamelCase )
if self.is_encoder_decoder:
snake_case_ = outputs['''encoder_hidden_states''']
snake_case_ = outputs['''encoder_attentions''']
else:
snake_case_ = outputs['''hidden_states''']
snake_case_ = outputs['''attentions''']
self.assertEqual(len(_UpperCamelCase ) , _UpperCamelCase )
snake_case_ = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_UpperCamelCase ) , _UpperCamelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def snake_case__( self : Union[str, Any] ) ->Tuple:
snake_case_ = TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' )
self.assertIsNotNone(_UpperCamelCase )
def snake_case__( self : List[str] ) ->Any:
snake_case_, snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = True
snake_case_ = getattr(self.model_tester , '''decoder_seq_length''' , self.model_tester.seq_length )
snake_case_ = getattr(self.model_tester , '''encoder_seq_length''' , self.model_tester.seq_length )
snake_case_ = getattr(self.model_tester , '''key_length''' , _UpperCamelCase )
snake_case_ = getattr(self.model_tester , '''key_length''' , _UpperCamelCase )
def check_decoder_attentions_output(_UpperCamelCase : List[Any] ):
snake_case_ = len(_UpperCamelCase )
self.assertEqual(out_len % 2 , 0 )
snake_case_ = outputs.decoder_attentions
self.assertEqual(len(_UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_UpperCamelCase : Dict ):
snake_case_ = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
snake_case_ = True
snake_case_ = False
snake_case_ = model_class(_UpperCamelCase )
snake_case_ = model(self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
snake_case_ = len(_UpperCamelCase )
self.assertEqual(config.output_hidden_states , _UpperCamelCase )
check_encoder_attentions_output(_UpperCamelCase )
if self.is_encoder_decoder:
snake_case_ = model_class(_UpperCamelCase )
snake_case_ = model(self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
self.assertEqual(config.output_hidden_states , _UpperCamelCase )
check_decoder_attentions_output(_UpperCamelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
snake_case_ = True
snake_case_ = model_class(_UpperCamelCase )
snake_case_ = model(self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
self.assertEqual(config.output_hidden_states , _UpperCamelCase )
check_encoder_attentions_output(_UpperCamelCase )
# Check attention is always last and order is fine
snake_case_ = True
snake_case_ = True
snake_case_ = model_class(_UpperCamelCase )
snake_case_ = model(self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_UpperCamelCase ) )
self.assertEqual(model.config.output_hidden_states , _UpperCamelCase )
check_encoder_attentions_output(_UpperCamelCase )
@require_tf
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case__( self : Any ) ->int:
snake_case_ = TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' )
snake_case_ = tf.constant([[0, 1, 2, 3, 4, 5]] )
snake_case_ = model(_UpperCamelCase )[0]
snake_case_ = [1, 6, 7_6_8]
self.assertEqual(output.shape , _UpperCamelCase )
snake_case_ = tf.constant(
[
[
[-0.03475493, -0.4686034, -0.30638832],
[0.22637248, -0.26988646, -0.7423424],
[0.10324868, -0.45013508, -0.58280784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _UpperCamelCase , atol=1e-4 ) | 39 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''huggingface/informer-tourism-monthly''': (
'''https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'''
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = "informer"
SCREAMING_SNAKE_CASE : int = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self : Dict , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : str = "student_t" , _UpperCamelCase : str = "nll" , _UpperCamelCase : int = 1 , _UpperCamelCase : List[int] = None , _UpperCamelCase : Optional[Union[str, bool]] = "mean" , _UpperCamelCase : int = 0 , _UpperCamelCase : int = 0 , _UpperCamelCase : int = 0 , _UpperCamelCase : int = 0 , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : int = 6_4 , _UpperCamelCase : int = 3_2 , _UpperCamelCase : int = 3_2 , _UpperCamelCase : int = 2 , _UpperCamelCase : int = 2 , _UpperCamelCase : int = 2 , _UpperCamelCase : int = 2 , _UpperCamelCase : bool = True , _UpperCamelCase : str = "gelu" , _UpperCamelCase : float = 0.05 , _UpperCamelCase : float = 0.1 , _UpperCamelCase : float = 0.1 , _UpperCamelCase : float = 0.1 , _UpperCamelCase : float = 0.1 , _UpperCamelCase : int = 1_0_0 , _UpperCamelCase : float = 0.02 , _UpperCamelCase : Dict=True , _UpperCamelCase : str = "prob" , _UpperCamelCase : int = 5 , _UpperCamelCase : bool = True , **_UpperCamelCase : Optional[Any] , ) ->Optional[int]:
# time series specific configuration
snake_case_ = prediction_length
snake_case_ = context_length or prediction_length
snake_case_ = distribution_output
snake_case_ = loss
snake_case_ = input_size
snake_case_ = num_time_features
snake_case_ = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
snake_case_ = scaling
snake_case_ = num_dynamic_real_features
snake_case_ = num_static_real_features
snake_case_ = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(_UpperCamelCase ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
snake_case_ = cardinality
else:
snake_case_ = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(_UpperCamelCase ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
snake_case_ = embedding_dimension
else:
snake_case_ = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
snake_case_ = num_parallel_samples
# Transformer architecture configuration
snake_case_ = input_size * len(self.lags_sequence ) + self._number_of_features
snake_case_ = d_model
snake_case_ = encoder_attention_heads
snake_case_ = decoder_attention_heads
snake_case_ = encoder_ffn_dim
snake_case_ = decoder_ffn_dim
snake_case_ = encoder_layers
snake_case_ = decoder_layers
snake_case_ = dropout
snake_case_ = attention_dropout
snake_case_ = activation_dropout
snake_case_ = encoder_layerdrop
snake_case_ = decoder_layerdrop
snake_case_ = activation_function
snake_case_ = init_std
snake_case_ = use_cache
# Informer
snake_case_ = attention_type
snake_case_ = sampling_factor
snake_case_ = distil
super().__init__(is_encoder_decoder=_UpperCamelCase , **_UpperCamelCase )
@property
def snake_case__( self : Optional[Any] ) ->int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
) | 39 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''microsoft/biogpt''': '''https://huggingface.co/microsoft/biogpt/resolve/main/config.json''',
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = "biogpt"
def __init__( self : Optional[Any] , _UpperCamelCase : List[str]=4_2_3_8_4 , _UpperCamelCase : Tuple=1_0_2_4 , _UpperCamelCase : Dict=2_4 , _UpperCamelCase : List[Any]=1_6 , _UpperCamelCase : str=4_0_9_6 , _UpperCamelCase : List[Any]="gelu" , _UpperCamelCase : List[Any]=0.1 , _UpperCamelCase : int=0.1 , _UpperCamelCase : Dict=1_0_2_4 , _UpperCamelCase : List[str]=0.02 , _UpperCamelCase : List[str]=1e-12 , _UpperCamelCase : Dict=True , _UpperCamelCase : Tuple=True , _UpperCamelCase : Tuple=0.0 , _UpperCamelCase : str=0.0 , _UpperCamelCase : str=1 , _UpperCamelCase : List[str]=0 , _UpperCamelCase : int=2 , **_UpperCamelCase : Tuple , ) ->List[Any]:
snake_case_ = vocab_size
snake_case_ = max_position_embeddings
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = scale_embedding
snake_case_ = use_cache
snake_case_ = layerdrop
snake_case_ = activation_dropout
super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase ) | 39 |
import cmath
import math
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = math.radians(SCREAMING_SNAKE_CASE__ )
snake_case_ = math.radians(SCREAMING_SNAKE_CASE__ )
# Convert voltage and current to rectangular form
snake_case_ = cmath.rect(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ = cmath.rect(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod() | 39 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = "dandelin/vilt-b32-finetuned-vqa"
SCREAMING_SNAKE_CASE : Any = (
"This is a tool that answers a question about an image. It takes an input named `image` which should be the "
"image containing the information, as well as a `question` which should be the question in English. It "
"returns a text that is the answer to the question."
)
SCREAMING_SNAKE_CASE : Tuple = "image_qa"
SCREAMING_SNAKE_CASE : str = AutoProcessor
SCREAMING_SNAKE_CASE : int = AutoModelForVisualQuestionAnswering
SCREAMING_SNAKE_CASE : str = ["image", "text"]
SCREAMING_SNAKE_CASE : Optional[int] = ["text"]
def __init__( self : List[Any] , *_UpperCamelCase : Dict , **_UpperCamelCase : List[Any] ) ->int:
requires_backends(self , ['''vision'''] )
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
def snake_case__( self : Optional[int] , _UpperCamelCase : "Image" , _UpperCamelCase : str ) ->Union[str, Any]:
return self.pre_processor(_UpperCamelCase , _UpperCamelCase , return_tensors='''pt''' )
def snake_case__( self : Any , _UpperCamelCase : Union[str, Any] ) ->str:
with torch.no_grad():
return self.model(**_UpperCamelCase ).logits
def snake_case__( self : Union[str, Any] , _UpperCamelCase : Dict ) ->Any:
snake_case_ = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx] | 39 |
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case_ :
'''simple docstring'''
def __init__( self : Optional[int] , _UpperCamelCase : Tuple , _UpperCamelCase : Optional[int]=1_3 , _UpperCamelCase : str=7 , _UpperCamelCase : int=True , _UpperCamelCase : Dict=True , _UpperCamelCase : int=False , _UpperCamelCase : Dict=True , _UpperCamelCase : Optional[int]=9_9 , _UpperCamelCase : str=3_2 , _UpperCamelCase : str=5 , _UpperCamelCase : str=4 , _UpperCamelCase : int=3_7 , _UpperCamelCase : int="gelu" , _UpperCamelCase : List[str]=0.1 , _UpperCamelCase : Dict=0.1 , _UpperCamelCase : str=5_1_2 , _UpperCamelCase : Optional[int]=1_6 , _UpperCamelCase : List[str]=2 , _UpperCamelCase : Any=0.02 , _UpperCamelCase : List[str]=3 , _UpperCamelCase : List[str]=4 , _UpperCamelCase : str=None , ) ->Dict:
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = num_choices
snake_case_ = scope
def snake_case__( self : str ) ->List[Any]:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_input_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ = None
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__( self : List[str] ) ->Tuple:
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , )
def snake_case__( self : int , _UpperCamelCase : int , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Any , _UpperCamelCase : List[str] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Union[str, Any] ) ->Dict:
snake_case_ = BioGptModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase )
snake_case_ = model(_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__( self : Optional[Any] , _UpperCamelCase : Dict , _UpperCamelCase : List[str] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : Optional[int] , _UpperCamelCase : Union[str, Any] , ) ->Optional[int]:
snake_case_ = BioGptForCausalLM(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__( self : Dict , _UpperCamelCase : str , _UpperCamelCase : List[str] , _UpperCamelCase : List[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str , *_UpperCamelCase : List[Any] ) ->Union[str, Any]:
snake_case_ = BioGptModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
# create attention mask
snake_case_ = torch.ones(input_ids.shape , dtype=torch.long , device=_UpperCamelCase )
snake_case_ = self.seq_length // 2
snake_case_ = 0
# first forward pass
snake_case_, snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase ).to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case_ = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
snake_case_ = ids_tensor((1,) , _UpperCamelCase ).item() + 1
snake_case_ = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
snake_case_ = random_other_next_tokens
# append to next input_ids and attn_mask
snake_case_ = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case_ = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=_UpperCamelCase )] , dim=1 , )
# get two different outputs
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase )['''last_hidden_state''']
snake_case_ = model(_UpperCamelCase , past_key_values=_UpperCamelCase , attention_mask=_UpperCamelCase )['''last_hidden_state''']
# select random slice
snake_case_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case_ = output_from_no_past[:, -1, random_slice_idx].detach()
snake_case_ = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-3 ) )
def snake_case__( self : Union[str, Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Dict , *_UpperCamelCase : List[Any] ) ->int:
snake_case_ = BioGptModel(config=_UpperCamelCase ).to(_UpperCamelCase ).eval()
snake_case_ = torch.ones(input_ids.shape , dtype=torch.long , device=_UpperCamelCase )
# first forward pass
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase , use_cache=_UpperCamelCase )
snake_case_, snake_case_ = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
snake_case_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case_ = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
snake_case_ = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case_ = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase )['''last_hidden_state''']
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase , past_key_values=_UpperCamelCase )[
'''last_hidden_state'''
]
# select random slice
snake_case_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case_ = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-3 ) )
def snake_case__( self : int , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str , _UpperCamelCase : str , _UpperCamelCase : Dict , _UpperCamelCase : Optional[Any] , *_UpperCamelCase : List[Any] , _UpperCamelCase : List[str]=False ) ->Dict:
snake_case_ = BioGptForCausalLM(_UpperCamelCase )
model.to(_UpperCamelCase )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
snake_case_ = model(_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def snake_case__( self : List[Any] , _UpperCamelCase : Optional[int] , *_UpperCamelCase : Dict ) ->Dict:
snake_case_ = BioGptModel(_UpperCamelCase )
snake_case_ = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def snake_case__( self : Any , _UpperCamelCase : Tuple , _UpperCamelCase : List[str] , _UpperCamelCase : List[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : int , *_UpperCamelCase : List[str] ) ->int:
snake_case_ = self.num_labels
snake_case_ = BioGptForTokenClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case__( self : Optional[Any] ) ->int:
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
),
) = config_and_inputs
snake_case_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class snake_case_ ( __A , __A , __A , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE : Tuple = (BioGptForCausalLM,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE : Optional[Any] = (
{
"feature-extraction": BioGptModel,
"text-classification": BioGptForSequenceClassification,
"text-generation": BioGptForCausalLM,
"token-classification": BioGptForTokenClassification,
"zero-shot": BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE : Tuple = False
def snake_case__( self : List[str] ) ->Union[str, Any]:
snake_case_ = BioGptModelTester(self )
snake_case_ = ConfigTester(self , config_class=_UpperCamelCase , hidden_size=3_7 )
def snake_case__( self : str ) ->int:
self.config_tester.run_common_tests()
def snake_case__( self : str ) ->Tuple:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def snake_case__( self : Tuple ) ->List[Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case_ = type
self.model_tester.create_and_check_model(*_UpperCamelCase )
def snake_case__( self : Tuple ) ->str:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*_UpperCamelCase )
def snake_case__( self : Union[str, Any] ) ->Dict:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*_UpperCamelCase , gradient_checkpointing=_UpperCamelCase )
def snake_case__( self : Optional[int] ) ->List[Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*_UpperCamelCase )
def snake_case__( self : List[Any] ) ->Union[str, Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*_UpperCamelCase )
def snake_case__( self : Optional[int] ) ->Optional[int]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*_UpperCamelCase )
@slow
def snake_case__( self : int ) ->Optional[Any]:
snake_case_ = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(_UpperCamelCase )
snake_case_ = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
snake_case_ = '''left'''
# Define PAD Token = EOS Token = 50256
snake_case_ = tokenizer.eos_token
snake_case_ = model.config.eos_token_id
# use different length sentences to test batching
snake_case_ = [
'''Hello, my dog is a little''',
'''Today, I''',
]
snake_case_ = tokenizer(_UpperCamelCase , return_tensors='''pt''' , padding=_UpperCamelCase )
snake_case_ = inputs['''input_ids'''].to(_UpperCamelCase )
snake_case_ = model.generate(
input_ids=_UpperCamelCase , attention_mask=inputs['''attention_mask'''].to(_UpperCamelCase ) , )
snake_case_ = tokenizer(sentences[0] , return_tensors='''pt''' ).input_ids.to(_UpperCamelCase )
snake_case_ = model.generate(input_ids=_UpperCamelCase )
snake_case_ = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item()
snake_case_ = tokenizer(sentences[1] , return_tensors='''pt''' ).input_ids.to(_UpperCamelCase )
snake_case_ = model.generate(input_ids=_UpperCamelCase , max_length=model.config.max_length - num_paddings )
snake_case_ = tokenizer.batch_decode(_UpperCamelCase , skip_special_tokens=_UpperCamelCase )
snake_case_ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=_UpperCamelCase )
snake_case_ = tokenizer.decode(output_padded[0] , skip_special_tokens=_UpperCamelCase )
snake_case_ = [
'''Hello, my dog is a little bit bigger than a little bit.''',
'''Today, I have a good idea of how to use the information''',
]
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
self.assertListEqual(_UpperCamelCase , [non_padded_sentence, padded_sentence] )
@slow
def snake_case__( self : Optional[int] ) ->List[str]:
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = BioGptModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def snake_case__( self : Optional[int] ) ->str:
snake_case_, snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = 3
snake_case_ = input_dict['''input_ids''']
snake_case_ = input_ids.ne(1 ).to(_UpperCamelCase )
snake_case_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
snake_case_ = BioGptForSequenceClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase , labels=_UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case__( self : str ) ->str:
snake_case_, snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = 3
snake_case_ = '''multi_label_classification'''
snake_case_ = input_dict['''input_ids''']
snake_case_ = input_ids.ne(1 ).to(_UpperCamelCase )
snake_case_ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
snake_case_ = BioGptForSequenceClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase , labels=_UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case__( self : int ) ->Any:
snake_case_ = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
snake_case_ = torch.tensor([[2, 4_8_0_5, 9, 6_5_6, 2_1]] )
snake_case_ = model(_UpperCamelCase )[0]
snake_case_ = 4_2_3_8_4
snake_case_ = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , _UpperCamelCase )
snake_case_ = torch.tensor(
[[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCamelCase , atol=1e-4 ) )
@slow
def snake_case__( self : List[str] ) ->Optional[int]:
snake_case_ = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
snake_case_ = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(_UpperCamelCase )
torch.manual_seed(0 )
snake_case_ = tokenizer('''COVID-19 is''' , return_tensors='''pt''' ).to(_UpperCamelCase )
snake_case_ = model.generate(
**_UpperCamelCase , min_length=1_0_0 , max_length=1_0_2_4 , num_beams=5 , early_stopping=_UpperCamelCase , )
snake_case_ = tokenizer.decode(output_ids[0] , skip_special_tokens=_UpperCamelCase )
snake_case_ = (
'''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'''
''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'''
''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'''
''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'''
''' more than 800,000 deaths.'''
)
self.assertEqual(_UpperCamelCase , _UpperCamelCase ) | 39 | 1 |
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case_ :
'''simple docstring'''
def __init__( self : str , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[Any]=3 , _UpperCamelCase : int=3_2 , _UpperCamelCase : Tuple=3 , _UpperCamelCase : List[str]=1_0 , _UpperCamelCase : Tuple=[1_0, 2_0, 3_0, 4_0] , _UpperCamelCase : Optional[Any]=[1, 1, 2, 1] , _UpperCamelCase : Any=True , _UpperCamelCase : List[Any]=True , _UpperCamelCase : Optional[Any]="relu" , _UpperCamelCase : Any=3 , _UpperCamelCase : Dict=None , ) ->str:
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = image_size
snake_case_ = num_channels
snake_case_ = embeddings_size
snake_case_ = hidden_sizes
snake_case_ = depths
snake_case_ = is_training
snake_case_ = use_labels
snake_case_ = hidden_act
snake_case_ = num_labels
snake_case_ = scope
snake_case_ = len(_UpperCamelCase )
def snake_case__( self : Tuple ) ->Any:
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.num_labels )
snake_case_ = self.get_config()
return config, pixel_values, labels
def snake_case__( self : Optional[int] ) ->Optional[Any]:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def snake_case__( self : Any , _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[int] ) ->Union[str, Any]:
snake_case_ = RegNetModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ = model(_UpperCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def snake_case__( self : Optional[int] , _UpperCamelCase : int , _UpperCamelCase : Dict , _UpperCamelCase : List[str] ) ->List[str]:
snake_case_ = self.num_labels
snake_case_ = RegNetForImageClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ = model(_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__( self : Tuple ) ->int:
snake_case_ = self.prepare_config_and_inputs()
snake_case_, snake_case_, snake_case_ = config_and_inputs
snake_case_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class snake_case_ ( __A , __A , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE : Tuple = (
{"feature-extraction": RegNetModel, "image-classification": RegNetForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE : int = False
SCREAMING_SNAKE_CASE : str = False
SCREAMING_SNAKE_CASE : Any = False
SCREAMING_SNAKE_CASE : Optional[int] = False
def snake_case__( self : str ) ->Dict:
snake_case_ = RegNetModelTester(self )
snake_case_ = ConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase )
def snake_case__( self : Tuple ) ->Dict:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case__( self : str ) ->Tuple:
return
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def snake_case__( self : Optional[Any] ) ->str:
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def snake_case__( self : Any ) ->List[Any]:
pass
def snake_case__( self : Any ) ->Optional[Any]:
snake_case_, snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(_UpperCamelCase )
snake_case_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ = [*signature.parameters.keys()]
snake_case_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
def snake_case__( self : Dict ) ->Any:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def snake_case__( self : List[str] ) ->Tuple:
snake_case_, snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(config=_UpperCamelCase )
for name, module in model.named_modules():
if isinstance(_UpperCamelCase , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def snake_case__( self : int ) ->Optional[int]:
def check_hidden_states_output(_UpperCamelCase : Optional[int] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int] ):
snake_case_ = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
snake_case_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
snake_case_ = self.model_tester.num_stages
self.assertEqual(len(_UpperCamelCase ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
snake_case_, snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
snake_case_ = layer_type
snake_case_ = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def snake_case__( self : str ) ->Any:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCamelCase )
@slow
def snake_case__( self : Dict ) ->str:
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = RegNetModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ():
snake_case_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case__( self : Optional[int] ) ->Tuple:
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def snake_case__( self : Dict ) ->str:
snake_case_ = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_UpperCamelCase )
snake_case_ = self.default_image_processor
snake_case_ = prepare_img()
snake_case_ = image_processor(images=_UpperCamelCase , return_tensors='''pt''' ).to(_UpperCamelCase )
# forward pass
with torch.no_grad():
snake_case_ = model(**_UpperCamelCase )
# verify the logits
snake_case_ = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
snake_case_ = torch.tensor([-0.4180, -1.5051, -3.4836] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCamelCase , atol=1e-4 ) ) | 39 |
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
snake_case_ = (boundary[1] - boundary[0]) / steps
snake_case_ = boundary[0]
snake_case_ = boundary[1]
snake_case_ = make_points(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ = 0.0
y += (h / 2.0) * f(SCREAMING_SNAKE_CASE__ )
for i in x_i:
# print(i)
y += h * f(SCREAMING_SNAKE_CASE__ )
y += (h / 2.0) * f(SCREAMING_SNAKE_CASE__ )
return y
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = a + h
while x < (b - h):
yield x
snake_case_ = x + h
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): # enter your function here
snake_case_ = (x - 0) * (x - 0)
return y
def __SCREAMING_SNAKE_CASE ():
snake_case_ = 0.0 # Lower bound of integration
snake_case_ = 1.0 # Upper bound of integration
snake_case_ = 10.0 # define number of steps or resolution
snake_case_ = [a, b] # define boundary of integration
snake_case_ = method_a(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
print(F'''y = {y}''' )
if __name__ == "__main__":
main() | 39 | 1 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''microsoft/layoutlmv3-base''': '''https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json''',
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = "layoutlmv3"
def __init__( self : int , _UpperCamelCase : Union[str, Any]=5_0_2_6_5 , _UpperCamelCase : Optional[int]=7_6_8 , _UpperCamelCase : Any=1_2 , _UpperCamelCase : Dict=1_2 , _UpperCamelCase : Any=3_0_7_2 , _UpperCamelCase : Union[str, Any]="gelu" , _UpperCamelCase : Any=0.1 , _UpperCamelCase : int=0.1 , _UpperCamelCase : Optional[Any]=5_1_2 , _UpperCamelCase : Tuple=2 , _UpperCamelCase : Optional[int]=0.02 , _UpperCamelCase : str=1e-5 , _UpperCamelCase : Optional[Any]=1 , _UpperCamelCase : Tuple=0 , _UpperCamelCase : List[Any]=2 , _UpperCamelCase : Dict=1_0_2_4 , _UpperCamelCase : Tuple=1_2_8 , _UpperCamelCase : List[str]=1_2_8 , _UpperCamelCase : str=True , _UpperCamelCase : Union[str, Any]=3_2 , _UpperCamelCase : Any=1_2_8 , _UpperCamelCase : Dict=6_4 , _UpperCamelCase : Optional[Any]=2_5_6 , _UpperCamelCase : int=True , _UpperCamelCase : Any=True , _UpperCamelCase : str=True , _UpperCamelCase : List[Any]=2_2_4 , _UpperCamelCase : int=3 , _UpperCamelCase : str=1_6 , _UpperCamelCase : Optional[int]=None , **_UpperCamelCase : List[Any] , ) ->Any:
super().__init__(
vocab_size=_UpperCamelCase , hidden_size=_UpperCamelCase , num_hidden_layers=_UpperCamelCase , num_attention_heads=_UpperCamelCase , intermediate_size=_UpperCamelCase , hidden_act=_UpperCamelCase , hidden_dropout_prob=_UpperCamelCase , attention_probs_dropout_prob=_UpperCamelCase , max_position_embeddings=_UpperCamelCase , type_vocab_size=_UpperCamelCase , initializer_range=_UpperCamelCase , layer_norm_eps=_UpperCamelCase , pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase , )
snake_case_ = max_ad_position_embeddings
snake_case_ = coordinate_size
snake_case_ = shape_size
snake_case_ = has_relative_attention_bias
snake_case_ = rel_pos_bins
snake_case_ = max_rel_pos
snake_case_ = has_spatial_attention_bias
snake_case_ = rel_ad_pos_bins
snake_case_ = max_rel_ad_pos
snake_case_ = text_embed
snake_case_ = visual_embed
snake_case_ = input_size
snake_case_ = num_channels
snake_case_ = patch_size
snake_case_ = classifier_dropout
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = version.parse("1.12" )
@property
def snake_case__( self : str ) ->Mapping[str, Mapping[int, str]]:
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
else:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels'''}),
] )
@property
def snake_case__( self : Any ) ->float:
return 1e-5
@property
def snake_case__( self : str ) ->int:
return 1_2
def snake_case__( self : Tuple , _UpperCamelCase : "ProcessorMixin" , _UpperCamelCase : int = -1 , _UpperCamelCase : int = -1 , _UpperCamelCase : bool = False , _UpperCamelCase : Optional["TensorType"] = None , _UpperCamelCase : int = 3 , _UpperCamelCase : int = 4_0 , _UpperCamelCase : int = 4_0 , ) ->Mapping[str, Any]:
setattr(processor.image_processor , '''apply_ocr''' , _UpperCamelCase )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
snake_case_ = compute_effective_axis_dimension(
_UpperCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
snake_case_ = processor.tokenizer.num_special_tokens_to_add(_UpperCamelCase )
snake_case_ = compute_effective_axis_dimension(
_UpperCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_UpperCamelCase )
# Generate dummy inputs according to compute batch and sequence
snake_case_ = [[''' '''.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
snake_case_ = [[[4_8, 8_4, 7_3, 1_2_8]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
snake_case_ = self._generate_dummy_images(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
snake_case_ = dict(
processor(
_UpperCamelCase , text=_UpperCamelCase , boxes=_UpperCamelCase , return_tensors=_UpperCamelCase , ) )
return inputs | 39 |
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
lowerCAmelCase_ = get_logger(__name__)
lowerCAmelCase_ = Path(__file__).parent / '''model_card_template.md'''
lowerCAmelCase_ = uuida().hex
lowerCAmelCase_ = os.getenv('''HF_HUB_OFFLINE''', '''''').upper() in ENV_VARS_TRUE_VALUES
lowerCAmelCase_ = os.getenv('''DISABLE_TELEMETRY''', '''''').upper() in ENV_VARS_TRUE_VALUES
lowerCAmelCase_ = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '''/api/telemetry/'''
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = None ):
snake_case_ = F'''diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}'''
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += F'''; torch/{_torch_version}'''
if is_flax_available():
ua += F'''; jax/{_jax_version}'''
ua += F'''; flax/{_flax_version}'''
if is_onnx_available():
ua += F'''; onnxruntime/{_onnxruntime_version}'''
# CI will set this value to True
if os.environ.get('''DIFFUSERS_IS_CI''' , '''''' ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
ua += "; " + "; ".join(F'''{k}/{v}''' for k, v in user_agent.items() )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
ua += "; " + user_agent
return ua
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None ):
if token is None:
snake_case_ = HfFolder.get_token()
if organization is None:
snake_case_ = whoami(SCREAMING_SNAKE_CASE__ )['''name''']
return F'''{username}/{model_id}'''
else:
return F'''{organization}/{model_id}'''
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if not is_jinja_available():
raise ValueError(
'''Modelcard rendering is based on Jinja templates.'''
''' Please make sure to have `jinja` installed before using `create_model_card`.'''
''' To install it, please run `pip install Jinja2`.''' )
if hasattr(SCREAMING_SNAKE_CASE__ , '''local_rank''' ) and args.local_rank not in [-1, 0]:
return
snake_case_ = args.hub_token if hasattr(SCREAMING_SNAKE_CASE__ , '''hub_token''' ) else None
snake_case_ = get_full_repo_name(SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ )
snake_case_ = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='''en''' , license='''apache-2.0''' , library_name='''diffusers''' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=SCREAMING_SNAKE_CASE__ , model_name=SCREAMING_SNAKE_CASE__ , repo_name=SCREAMING_SNAKE_CASE__ , dataset_name=args.dataset_name if hasattr(SCREAMING_SNAKE_CASE__ , '''dataset_name''' ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(SCREAMING_SNAKE_CASE__ , '''gradient_accumulation_steps''' ) else None
) , adam_betaa=args.adam_betaa if hasattr(SCREAMING_SNAKE_CASE__ , '''adam_beta1''' ) else None , adam_betaa=args.adam_betaa if hasattr(SCREAMING_SNAKE_CASE__ , '''adam_beta2''' ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(SCREAMING_SNAKE_CASE__ , '''adam_weight_decay''' ) else None , adam_epsilon=args.adam_epsilon if hasattr(SCREAMING_SNAKE_CASE__ , '''adam_epsilon''' ) else None , lr_scheduler=args.lr_scheduler if hasattr(SCREAMING_SNAKE_CASE__ , '''lr_scheduler''' ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(SCREAMING_SNAKE_CASE__ , '''lr_warmup_steps''' ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(SCREAMING_SNAKE_CASE__ , '''ema_inv_gamma''' ) else None , ema_power=args.ema_power if hasattr(SCREAMING_SNAKE_CASE__ , '''ema_power''' ) else None , ema_max_decay=args.ema_max_decay if hasattr(SCREAMING_SNAKE_CASE__ , '''ema_max_decay''' ) else None , mixed_precision=args.mixed_precision , )
snake_case_ = os.path.join(args.output_dir , '''README.md''' )
model_card.save(SCREAMING_SNAKE_CASE__ )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
if resolved_file is None or commit_hash is not None:
return commit_hash
snake_case_ = str(Path(SCREAMING_SNAKE_CASE__ ).as_posix() )
snake_case_ = re.search(R'''snapshots/([^/]+)/''' , SCREAMING_SNAKE_CASE__ )
if search is None:
return None
snake_case_ = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(SCREAMING_SNAKE_CASE__ ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
lowerCAmelCase_ = os.path.expanduser(
os.getenv('''HF_HOME''', os.path.join(os.getenv('''XDG_CACHE_HOME''', '''~/.cache'''), '''huggingface'''))
)
lowerCAmelCase_ = os.path.join(hf_cache_home, '''diffusers''')
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None ):
if new_cache_dir is None:
snake_case_ = DIFFUSERS_CACHE
if old_cache_dir is None:
snake_case_ = old_diffusers_cache
snake_case_ = Path(SCREAMING_SNAKE_CASE__ ).expanduser()
snake_case_ = Path(SCREAMING_SNAKE_CASE__ ).expanduser()
for old_blob_path in old_cache_dir.glob('''**/blobs/*''' ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
snake_case_ = new_cache_dir / old_blob_path.relative_to(SCREAMING_SNAKE_CASE__ )
new_blob_path.parent.mkdir(parents=SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
os.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
try:
os.symlink(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
except OSError:
logger.warning(
'''Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.''' )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
lowerCAmelCase_ = os.path.join(DIFFUSERS_CACHE, '''version_diffusers_cache.txt''')
if not os.path.isfile(cache_version_file):
lowerCAmelCase_ = 0
else:
with open(cache_version_file) as f:
try:
lowerCAmelCase_ = int(f.read())
except ValueError:
lowerCAmelCase_ = 0
if cache_version < 1:
lowerCAmelCase_ = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'''The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '''
'''existing cached models. This is a one-time operation, you can interrupt it or run it '''
'''later by calling `diffusers.utils.hub_utils.move_cache()`.'''
)
try:
move_cache()
except Exception as e:
lowerCAmelCase_ = '''\n'''.join(traceback.format_tb(e.__traceback__))
logger.error(
f"""There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease """
'''file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '''
'''message and we will do our best to help.'''
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, '''w''') as f:
f.write('''1''')
except Exception:
logger.warning(
f"""There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure """
'''the directory exists and can be written to.'''
)
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
if variant is not None:
snake_case_ = weights_name.split('''.''' )
snake_case_ = splits[:-1] + [variant] + splits[-1:]
snake_case_ = '''.'''.join(SCREAMING_SNAKE_CASE__ )
return weights_name
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , *,
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , ):
snake_case_ = str(SCREAMING_SNAKE_CASE__ )
if os.path.isfile(SCREAMING_SNAKE_CASE__ ):
return pretrained_model_name_or_path
elif os.path.isdir(SCREAMING_SNAKE_CASE__ ):
if os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ):
# Load from a PyTorch checkpoint
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ):
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return model_file
else:
raise EnvironmentError(
F'''Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.''' )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(SCREAMING_SNAKE_CASE__ ).base_version ) >= version.parse('''0.20.0''' )
):
try:
snake_case_ = hf_hub_download(
SCREAMING_SNAKE_CASE__ , filename=_add_variant(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , cache_dir=SCREAMING_SNAKE_CASE__ , force_download=SCREAMING_SNAKE_CASE__ , proxies=SCREAMING_SNAKE_CASE__ , resume_download=SCREAMING_SNAKE_CASE__ , local_files_only=SCREAMING_SNAKE_CASE__ , use_auth_token=SCREAMING_SNAKE_CASE__ , user_agent=SCREAMING_SNAKE_CASE__ , subfolder=SCREAMING_SNAKE_CASE__ , revision=revision or commit_hash , )
warnings.warn(
F'''Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.''' , SCREAMING_SNAKE_CASE__ , )
return model_file
except: # noqa: E722
warnings.warn(
F'''You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}\' so that the correct variant file can be added.''' , SCREAMING_SNAKE_CASE__ , )
try:
# 2. Load model file as usual
snake_case_ = hf_hub_download(
SCREAMING_SNAKE_CASE__ , filename=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , force_download=SCREAMING_SNAKE_CASE__ , proxies=SCREAMING_SNAKE_CASE__ , resume_download=SCREAMING_SNAKE_CASE__ , local_files_only=SCREAMING_SNAKE_CASE__ , use_auth_token=SCREAMING_SNAKE_CASE__ , user_agent=SCREAMING_SNAKE_CASE__ , subfolder=SCREAMING_SNAKE_CASE__ , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
F'''{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier '''
'''listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '''
'''token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '''
'''login`.''' )
except RevisionNotFoundError:
raise EnvironmentError(
F'''{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for '''
'''this model name. Check the model page at '''
F'''\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.''' )
except EntryNotFoundError:
raise EnvironmentError(
F'''{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.''' )
except HTTPError as err:
raise EnvironmentError(
F'''There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}''' )
except ValueError:
raise EnvironmentError(
F'''We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it'''
F''' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a'''
F''' directory containing a file named {weights_name} or'''
''' \nCheckout your internet connection or see how to run the library in'''
''' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.''' )
except EnvironmentError:
raise EnvironmentError(
F'''Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from '''
'''\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '''
F'''Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory '''
F'''containing a file named {weights_name}''' ) | 39 | 1 |
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class snake_case_ ( __A , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = KandinskyVaaPriorPipeline
SCREAMING_SNAKE_CASE : Union[str, Any] = ["prompt"]
SCREAMING_SNAKE_CASE : str = ["prompt", "negative_prompt"]
SCREAMING_SNAKE_CASE : Union[str, Any] = [
"num_images_per_prompt",
"generator",
"num_inference_steps",
"latents",
"negative_prompt",
"guidance_scale",
"output_type",
"return_dict",
]
SCREAMING_SNAKE_CASE : Any = False
@property
def snake_case__( self : int ) ->Optional[Any]:
return 3_2
@property
def snake_case__( self : str ) ->str:
return 3_2
@property
def snake_case__( self : str ) ->Dict:
return self.time_input_dim
@property
def snake_case__( self : int ) ->Tuple:
return self.time_input_dim * 4
@property
def snake_case__( self : List[str] ) ->Tuple:
return 1_0_0
@property
def snake_case__( self : Any ) ->Tuple:
snake_case_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def snake_case__( self : Optional[int] ) ->Optional[int]:
torch.manual_seed(0 )
snake_case_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(_UpperCamelCase )
@property
def snake_case__( self : Tuple ) ->Optional[Any]:
torch.manual_seed(0 )
snake_case_ = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 1_2,
'''embedding_dim''': self.text_embedder_hidden_size,
'''num_layers''': 1,
}
snake_case_ = PriorTransformer(**_UpperCamelCase )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
snake_case_ = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def snake_case__( self : Dict ) ->Dict:
torch.manual_seed(0 )
snake_case_ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=2_2_4 , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1_4 , )
snake_case_ = CLIPVisionModelWithProjection(_UpperCamelCase )
return model
@property
def snake_case__( self : int ) ->List[Any]:
snake_case_ = CLIPImageProcessor(
crop_size=2_2_4 , do_center_crop=_UpperCamelCase , do_normalize=_UpperCamelCase , do_resize=_UpperCamelCase , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=2_2_4 , )
return image_processor
def snake_case__( self : str ) ->Optional[int]:
snake_case_ = self.dummy_prior
snake_case_ = self.dummy_image_encoder
snake_case_ = self.dummy_text_encoder
snake_case_ = self.dummy_tokenizer
snake_case_ = self.dummy_image_processor
snake_case_ = UnCLIPScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1_0_0_0 , clip_sample=_UpperCamelCase , clip_sample_range=10.0 , )
snake_case_ = {
'''prior''': prior,
'''image_encoder''': image_encoder,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''scheduler''': scheduler,
'''image_processor''': image_processor,
}
return components
def snake_case__( self : Optional[int] , _UpperCamelCase : int , _UpperCamelCase : Dict=0 ) ->Union[str, Any]:
if str(_UpperCamelCase ).startswith('''mps''' ):
snake_case_ = torch.manual_seed(_UpperCamelCase )
else:
snake_case_ = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
snake_case_ = {
'''prompt''': '''horse''',
'''generator''': generator,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def snake_case__( self : Any ) ->List[Any]:
snake_case_ = '''cpu'''
snake_case_ = self.get_dummy_components()
snake_case_ = self.pipeline_class(**_UpperCamelCase )
snake_case_ = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
snake_case_ = pipe(**self.get_dummy_inputs(_UpperCamelCase ) )
snake_case_ = output.image_embeds
snake_case_ = pipe(
**self.get_dummy_inputs(_UpperCamelCase ) , return_dict=_UpperCamelCase , )[0]
snake_case_ = image[0, -1_0:]
snake_case_ = image_from_tuple[0, -1_0:]
assert image.shape == (1, 3_2)
snake_case_ = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def snake_case__( self : Any ) ->Optional[int]:
snake_case_ = torch_device == '''cpu'''
snake_case_ = True
snake_case_ = False
self._test_inference_batch_single_identical(
test_max_difference=_UpperCamelCase , relax_max_difference=_UpperCamelCase , test_mean_pixel_difference=_UpperCamelCase , )
@skip_mps
def snake_case__( self : Tuple ) ->Tuple:
snake_case_ = torch_device == '''cpu'''
snake_case_ = False
self._test_attention_slicing_forward_pass(
test_max_difference=_UpperCamelCase , test_mean_pixel_difference=_UpperCamelCase , ) | 39 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''Intel/dpt-large''': '''https://huggingface.co/Intel/dpt-large/resolve/main/config.json''',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = "dpt"
def __init__( self : Optional[Any] , _UpperCamelCase : Tuple=7_6_8 , _UpperCamelCase : Dict=1_2 , _UpperCamelCase : Union[str, Any]=1_2 , _UpperCamelCase : List[Any]=3_0_7_2 , _UpperCamelCase : Dict="gelu" , _UpperCamelCase : Union[str, Any]=0.0 , _UpperCamelCase : Optional[int]=0.0 , _UpperCamelCase : Optional[int]=0.02 , _UpperCamelCase : List[str]=1e-12 , _UpperCamelCase : Any=3_8_4 , _UpperCamelCase : int=1_6 , _UpperCamelCase : Any=3 , _UpperCamelCase : Dict=False , _UpperCamelCase : str=True , _UpperCamelCase : Union[str, Any]=[2, 5, 8, 1_1] , _UpperCamelCase : List[str]="project" , _UpperCamelCase : Optional[int]=[4, 2, 1, 0.5] , _UpperCamelCase : Dict=[9_6, 1_9_2, 3_8_4, 7_6_8] , _UpperCamelCase : Dict=2_5_6 , _UpperCamelCase : Optional[Any]=-1 , _UpperCamelCase : int=False , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : str=0.4 , _UpperCamelCase : Tuple=2_5_5 , _UpperCamelCase : Union[str, Any]=0.1 , _UpperCamelCase : Tuple=[1, 1_0_2_4, 2_4, 2_4] , _UpperCamelCase : List[str]=[0, 1] , _UpperCamelCase : List[Any]=None , **_UpperCamelCase : Dict , ) ->Any:
super().__init__(**_UpperCamelCase )
snake_case_ = hidden_size
snake_case_ = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info('''Initializing the config with a `BiT` backbone.''' )
snake_case_ = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
}
snake_case_ = BitConfig(**_UpperCamelCase )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
logger.info('''Initializing the config with a `BiT` backbone.''' )
snake_case_ = BitConfig(**_UpperCamelCase )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
snake_case_ = backbone_config
else:
raise ValueError(
f'''backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.''' )
snake_case_ = backbone_featmap_shape
snake_case_ = neck_ignore_stages
if readout_type != "project":
raise ValueError('''Readout type must be \'project\' when using `DPT-hybrid` mode.''' )
else:
snake_case_ = None
snake_case_ = None
snake_case_ = []
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = qkv_bias
snake_case_ = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError('''Readout_type must be one of [\'ignore\', \'add\', \'project\']''' )
snake_case_ = readout_type
snake_case_ = reassemble_factors
snake_case_ = neck_hidden_sizes
snake_case_ = fusion_hidden_size
snake_case_ = head_in_index
snake_case_ = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
snake_case_ = use_auxiliary_head
snake_case_ = auxiliary_loss_weight
snake_case_ = semantic_loss_ignore_index
snake_case_ = semantic_classifier_dropout
def snake_case__( self : List[str] ) ->List[Any]:
snake_case_ = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
snake_case_ = self.backbone_config.to_dict()
snake_case_ = self.__class__.model_type
return output | 39 | 1 |
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = tmp_path / '''file.csv'''
snake_case_ = textwrap.dedent(
'''\
header1,header2
1,2
10,20
''' )
with open(SCREAMING_SNAKE_CASE__ , '''w''' ) as f:
f.write(SCREAMING_SNAKE_CASE__ )
return str(SCREAMING_SNAKE_CASE__ )
@pytest.fixture
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = tmp_path / '''malformed_file.csv'''
snake_case_ = textwrap.dedent(
'''\
header1,header2
1,2
10,20,
''' )
with open(SCREAMING_SNAKE_CASE__ , '''w''' ) as f:
f.write(SCREAMING_SNAKE_CASE__ )
return str(SCREAMING_SNAKE_CASE__ )
@pytest.fixture
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = tmp_path / '''csv_with_image.csv'''
snake_case_ = textwrap.dedent(
F'''\
image
{image_file}
''' )
with open(SCREAMING_SNAKE_CASE__ , '''w''' ) as f:
f.write(SCREAMING_SNAKE_CASE__ )
return str(SCREAMING_SNAKE_CASE__ )
@pytest.fixture
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = tmp_path / '''csv_with_label.csv'''
snake_case_ = textwrap.dedent(
'''\
label
good
bad
good
''' )
with open(SCREAMING_SNAKE_CASE__ , '''w''' ) as f:
f.write(SCREAMING_SNAKE_CASE__ )
return str(SCREAMING_SNAKE_CASE__ )
@pytest.fixture
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = tmp_path / '''csv_with_int_list.csv'''
snake_case_ = textwrap.dedent(
'''\
int_list
1 2 3
4 5 6
7 8 9
''' )
with open(SCREAMING_SNAKE_CASE__ , '''w''' ) as f:
f.write(SCREAMING_SNAKE_CASE__ )
return str(SCREAMING_SNAKE_CASE__ )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = Csv()
snake_case_ = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(SCREAMING_SNAKE_CASE__ , match='''Error tokenizing data''' ):
for _ in generator:
pass
assert any(
record.levelname == '''ERROR'''
and '''Failed to read file''' in record.message
and os.path.basename(SCREAMING_SNAKE_CASE__ ) in record.message
for record in caplog.records )
@require_pil
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
with open(SCREAMING_SNAKE_CASE__ , encoding='''utf-8''' ) as f:
snake_case_ = f.read().splitlines()[1]
snake_case_ = Csv(encoding='''utf-8''' , features=Features({'''image''': Image()} ) )
snake_case_ = csv._generate_tables([[csv_file_with_image]] )
snake_case_ = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('''image''' ).type == Image()()
snake_case_ = pa_table.to_pydict()['''image''']
assert generated_content == [{"path": image_file, "bytes": None}]
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
with open(SCREAMING_SNAKE_CASE__ , encoding='''utf-8''' ) as f:
snake_case_ = f.read().splitlines()[1:]
snake_case_ = Csv(encoding='''utf-8''' , features=Features({'''label''': ClassLabel(names=['''good''', '''bad'''] )} ) )
snake_case_ = csv._generate_tables([[csv_file_with_label]] )
snake_case_ = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('''label''' ).type == ClassLabel(names=['''good''', '''bad'''] )()
snake_case_ = pa_table.to_pydict()['''label''']
assert generated_content == [ClassLabel(names=['''good''', '''bad'''] ).straint(SCREAMING_SNAKE_CASE__ ) for label in labels]
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = Csv(encoding='''utf-8''' , sep=''',''' , converters={'''int_list''': lambda SCREAMING_SNAKE_CASE__ : [int(SCREAMING_SNAKE_CASE__ ) for i in x.split()]} )
snake_case_ = csv._generate_tables([[csv_file_with_int_list]] )
snake_case_ = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field('''int_list''' ).type )
snake_case_ = pa_table.to_pydict()['''int_list''']
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]] | 39 |
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
lowerCAmelCase_ = logging.getLogger(__name__)
lowerCAmelCase_ = '''pytorch_model.bin'''
@dataclasses.dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = dataclasses.field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models."} )
SCREAMING_SNAKE_CASE : Optional[str] = dataclasses.field(
default=__A , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co."} , )
@dataclasses.dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = dataclasses.field(metadata={"help": "A csv or a json file containing the training data."} )
SCREAMING_SNAKE_CASE : str = dataclasses.field(metadata={"help": "A csv or a json file containing the data to predict on."} )
SCREAMING_SNAKE_CASE : Optional[str] = dataclasses.field(
default=__A , metadata={"help": "A csv or a json file containing the validation data."} )
SCREAMING_SNAKE_CASE : Optional[str] = dataclasses.field(
default=__A , metadata={"help": "The name of the task to train on."} , )
SCREAMING_SNAKE_CASE : Optional[List[str]] = dataclasses.field(
default=__A , metadata={"help": "The list of labels for the task."} )
@dataclasses.dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = dataclasses.field(
metadata={"help": "The output directory where the model predictions and checkpoints will be written."} )
SCREAMING_SNAKE_CASE : Optional[str] = dataclasses.field(
default="accuracy" , metadata={"help": "The evaluation metric used for the task."} )
SCREAMING_SNAKE_CASE : Optional[str] = dataclasses.field(
default="no" , metadata={
"help": "The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]"
} , )
SCREAMING_SNAKE_CASE : Optional[int] = dataclasses.field(
default=10 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
SCREAMING_SNAKE_CASE : Optional[float] = dataclasses.field(
default=0.0 , metadata={
"help": "How much the specified evaluation metric must improve to satisfy early stopping conditions."
} , )
SCREAMING_SNAKE_CASE : Optional[bool] = dataclasses.field(
default=__A , metadata={"help": "Whether to filter the pseudo-labeled data based on the confidence score."} , )
SCREAMING_SNAKE_CASE : Optional[bool] = dataclasses.field(
default=__A , metadata={"help": "Whether to filter the pseudo-labeled data based on the validation performance."} , )
SCREAMING_SNAKE_CASE : Optional[bool] = dataclasses.field(
default=__A , metadata={"help": "Whether to fine-tune on labeled data after pseudo training."} , )
SCREAMING_SNAKE_CASE : Optional[float] = dataclasses.field(
default=0.0 , metadata={"help": "Confidence threshold for pseudo-labeled data filtering."} , )
SCREAMING_SNAKE_CASE : Optional[int] = dataclasses.field(
default=100 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
SCREAMING_SNAKE_CASE : Optional[int] = dataclasses.field(
default=__A , metadata={"help": "Random seed for initialization."} , )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
snake_case_ = dataset.filter(lambda SCREAMING_SNAKE_CASE__ : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
snake_case_ = int(eval_result * len(SCREAMING_SNAKE_CASE__ ) )
print(SCREAMING_SNAKE_CASE__ )
snake_case_ = dataset.sort('''probability''' , reverse=SCREAMING_SNAKE_CASE__ )
snake_case_ = dataset.select(range(SCREAMING_SNAKE_CASE__ ) )
snake_case_ = dataset.remove_columns(['''label''', '''probability'''] )
snake_case_ = dataset.rename_column('''prediction''' , '''label''' )
snake_case_ = dataset.map(lambda SCREAMING_SNAKE_CASE__ : {"label": idalabel[example["label"]]} )
snake_case_ = dataset.shuffle(seed=args.seed )
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , F'''train_pseudo.{args.data_file_extension}''' )
if args.data_file_extension == "csv":
dataset.to_csv(SCREAMING_SNAKE_CASE__ , index=SCREAMING_SNAKE_CASE__ )
else:
dataset.to_json(SCREAMING_SNAKE_CASE__ )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
snake_case_ = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
snake_case_ = STModelArguments(model_name_or_path=SCREAMING_SNAKE_CASE__ )
snake_case_ = STDataArguments(train_file=SCREAMING_SNAKE_CASE__ , infer_file=SCREAMING_SNAKE_CASE__ )
snake_case_ = STTrainingArguments(output_dir=SCREAMING_SNAKE_CASE__ )
snake_case_ = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(SCREAMING_SNAKE_CASE__ ).items():
setattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for key, value in kwargs.items():
if hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
setattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Sanity checks
snake_case_ = {}
snake_case_ = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
snake_case_ = args.train_file
snake_case_ = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
snake_case_ = args.eval_file
for key in data_files:
snake_case_ = data_files[key].split('''.''' )[-1]
assert extension in ["csv", "json"], F'''`{key}_file` should be a csv or a json file.'''
if args.data_file_extension is None:
snake_case_ = extension
else:
assert extension == args.data_file_extension, F'''`{key}_file` should be a {args.data_file_extension} file`.'''
assert (
args.eval_metric in datasets.list_metrics()
), F'''{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.'''
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info('''Creating the initial data directory for self-training...''' )
snake_case_ = F'''{args.output_dir}/self-train_iter-{{}}'''.format
snake_case_ = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=SCREAMING_SNAKE_CASE__ )
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
snake_case_ = None
snake_case_ = None
snake_case_ = 0
snake_case_ = False
# Show the progress bar
snake_case_ = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
snake_case_ = data_dir_format(SCREAMING_SNAKE_CASE__ )
assert os.path.exists(SCREAMING_SNAKE_CASE__ )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''stage-1''' )
snake_case_ = {
'''accelerator''': accelerator,
'''model_name_or_path''': args.model_name_or_path,
'''cache_dir''': args.cache_dir,
'''do_train''': True,
'''train_file''': data_files['''train'''] if iteration == 0 else data_files['''train_pseudo'''],
'''do_eval''': True if args.eval_file is not None else False,
'''eval_file''': data_files['''eval'''],
'''do_predict''': True,
'''infer_file''': data_files['''infer'''],
'''task_name''': args.task_name,
'''label_list''': args.label_list,
'''output_dir''': current_output_dir,
'''eval_metric''': args.eval_metric,
'''evaluation_strategy''': args.evaluation_strategy,
'''early_stopping_patience''': args.early_stopping_patience,
'''early_stopping_threshold''': args.early_stopping_threshold,
'''seed''': args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
arguments_dict.update({key: value} )
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''best-checkpoint''' , SCREAMING_SNAKE_CASE__ )
if os.path.exists(SCREAMING_SNAKE_CASE__ ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.''' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 1 *****''' , SCREAMING_SNAKE_CASE__ )
finetune(**SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
assert os.path.exists(SCREAMING_SNAKE_CASE__ )
logger.info('''Self-training job completed: iteration: %d, stage: 1.''' , SCREAMING_SNAKE_CASE__ )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''best-checkpoint''' )
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''stage-2''' )
# Update arguments_dict
snake_case_ = model_path
snake_case_ = data_files['''train''']
snake_case_ = current_output_dir
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''best-checkpoint''' , SCREAMING_SNAKE_CASE__ )
if os.path.exists(SCREAMING_SNAKE_CASE__ ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.''' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 2 *****''' , SCREAMING_SNAKE_CASE__ )
finetune(**SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
assert os.path.exists(SCREAMING_SNAKE_CASE__ )
logger.info('''Self-training job completed: iteration: %d, stage: 2.''' , SCREAMING_SNAKE_CASE__ )
snake_case_ = iteration
snake_case_ = data_dir_format(iteration + 1 )
snake_case_ = AutoConfig.from_pretrained(os.path.join(SCREAMING_SNAKE_CASE__ , '''best-checkpoint''' ) )
snake_case_ = config.idalabel
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''eval_results_best-checkpoint.json''' )
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''test_results_best-checkpoint.json''' )
assert os.path.exists(SCREAMING_SNAKE_CASE__ )
with open(SCREAMING_SNAKE_CASE__ , '''r''' ) as f:
snake_case_ = float(json.load(SCREAMING_SNAKE_CASE__ )[args.eval_metric] )
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''infer_output_best-checkpoint.csv''' )
assert os.path.exists(SCREAMING_SNAKE_CASE__ )
# Loading the dataset from local csv or json files.
snake_case_ = load_dataset(args.data_file_extension , data_files={'''data''': data_files['''infer''']} )['''data''']
snake_case_ = load_dataset('''csv''' , data_files={'''data''': infer_output_file} )['''data''']
if accelerator.is_main_process:
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
shutil.copy(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , F'''eval_results_iter-{iteration}.json''' ) )
if os.path.exists(SCREAMING_SNAKE_CASE__ ):
shutil.copy(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , F'''test_results_iter-{iteration}.json''' ) )
create_pseudo_labeled_data(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , F'''train_pseudo.{args.data_file_extension}''' )
if args.evaluation_strategy != IntervalStrategy.NO.value:
snake_case_ = eval_result
if best_iteration is None:
snake_case_ = new_iteration
snake_case_ = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
snake_case_ = new_iteration
snake_case_ = new_eval_result
snake_case_ = 0
else:
if new_eval_result == best_eval_result:
snake_case_ = new_iteration
snake_case_ = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
snake_case_ = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info('''Best iteration: %d''' , SCREAMING_SNAKE_CASE__ )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(SCREAMING_SNAKE_CASE__ , F'''eval_results_iter-{iteration}.json''' ) , os.path.join(SCREAMING_SNAKE_CASE__ , '''eval_results_best-iteration.json''' ) , )
else:
# Assume that the last iteration is the best
logger.info('''Best iteration: %d''' , args.max_selftrain_iterations - 1 )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(SCREAMING_SNAKE_CASE__ , F'''eval_results_iter-{args.max_selftrain_iterations - 1}.json''' ) , os.path.join(SCREAMING_SNAKE_CASE__ , '''eval_results_best-iteration.json''' ) , ) | 39 | 1 |
import warnings
from functools import wraps
from typing import Callable
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
@wraps(SCREAMING_SNAKE_CASE__ )
def _inner_fn(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
warnings.warn(
(F'''\'{fn.__name__}\' is experimental and might be subject to breaking changes in the future.''') , SCREAMING_SNAKE_CASE__ , )
return fn(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
return _inner_fn | 39 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case_ ( __A , __A , __A , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = AltDiffusionPipeline
SCREAMING_SNAKE_CASE : Dict = TEXT_TO_IMAGE_PARAMS
SCREAMING_SNAKE_CASE : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
SCREAMING_SNAKE_CASE : Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
SCREAMING_SNAKE_CASE : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
def snake_case__( self : Dict ) ->int:
torch.manual_seed(0 )
snake_case_ = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , )
snake_case_ = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_UpperCamelCase , set_alpha_to_one=_UpperCamelCase , )
torch.manual_seed(0 )
snake_case_ = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
snake_case_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_0_0_2 , )
snake_case_ = CLIPTextModel(_UpperCamelCase )
snake_case_ = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
snake_case_ = 7_7
snake_case_ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def snake_case__( self : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : Dict=0 ) ->Any:
if str(_UpperCamelCase ).startswith('''mps''' ):
snake_case_ = torch.manual_seed(_UpperCamelCase )
else:
snake_case_ = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
snake_case_ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def snake_case__( self : Dict ) ->List[str]:
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def snake_case__( self : List[str] ) ->Any:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def snake_case__( self : Dict ) ->Any:
snake_case_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case_ = self.get_dummy_components()
torch.manual_seed(0 )
snake_case_ = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_0_0_2 , )
# TODO: remove after fixing the non-deterministic text encoder
snake_case_ = RobertaSeriesModelWithTransformation(_UpperCamelCase )
snake_case_ = text_encoder
snake_case_ = AltDiffusionPipeline(**_UpperCamelCase )
snake_case_ = alt_pipe.to(_UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=_UpperCamelCase )
snake_case_ = self.get_dummy_inputs(_UpperCamelCase )
snake_case_ = '''A photo of an astronaut'''
snake_case_ = alt_pipe(**_UpperCamelCase )
snake_case_ = output.images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
snake_case_ = np.array(
[0.5748162, 0.60447145, 0.48821217, 0.50100636, 0.5431185, 0.45763683, 0.49657696, 0.48132733, 0.47573093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case__( self : Tuple ) ->Union[str, Any]:
snake_case_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case_ = self.get_dummy_components()
snake_case_ = PNDMScheduler(skip_prk_steps=_UpperCamelCase )
torch.manual_seed(0 )
snake_case_ = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_0_0_2 , )
# TODO: remove after fixing the non-deterministic text encoder
snake_case_ = RobertaSeriesModelWithTransformation(_UpperCamelCase )
snake_case_ = text_encoder
snake_case_ = AltDiffusionPipeline(**_UpperCamelCase )
snake_case_ = alt_pipe.to(_UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=_UpperCamelCase )
snake_case_ = self.get_dummy_inputs(_UpperCamelCase )
snake_case_ = alt_pipe(**_UpperCamelCase )
snake_case_ = output.images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
snake_case_ = np.array(
[0.51605093, 0.5707241, 0.47365507, 0.50578886, 0.5633877, 0.4642503, 0.5182081, 0.48763484, 0.49084237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : int ) ->List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__( self : List[str] ) ->Tuple:
# make sure here that pndm scheduler skips prk
snake_case_ = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , safety_checker=_UpperCamelCase )
snake_case_ = alt_pipe.to(_UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=_UpperCamelCase )
snake_case_ = '''A painting of a squirrel eating a burger'''
snake_case_ = torch.manual_seed(0 )
snake_case_ = alt_pipe([prompt] , generator=_UpperCamelCase , guidance_scale=6.0 , num_inference_steps=2_0 , output_type='''np''' )
snake_case_ = output.images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
snake_case_ = np.array([0.1010, 0.0800, 0.0794, 0.0885, 0.0843, 0.0762, 0.0769, 0.0729, 0.0586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case__( self : List[str] ) ->Optional[Any]:
snake_case_ = DDIMScheduler.from_pretrained('''BAAI/AltDiffusion''' , subfolder='''scheduler''' )
snake_case_ = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , scheduler=_UpperCamelCase , safety_checker=_UpperCamelCase )
snake_case_ = alt_pipe.to(_UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=_UpperCamelCase )
snake_case_ = '''A painting of a squirrel eating a burger'''
snake_case_ = torch.manual_seed(0 )
snake_case_ = alt_pipe([prompt] , generator=_UpperCamelCase , num_inference_steps=2 , output_type='''numpy''' )
snake_case_ = output.images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
snake_case_ = np.array([0.4019, 0.4052, 0.3810, 0.4119, 0.3916, 0.3982, 0.4651, 0.4195, 0.5323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 39 | 1 |
from math import factorial
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = 20 ):
snake_case_ = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
snake_case_ = n // 2
return int(factorial(SCREAMING_SNAKE_CASE__ ) / (factorial(SCREAMING_SNAKE_CASE__ ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
lowerCAmelCase_ = int(sys.argv[1])
print(solution(n))
except ValueError:
print('''Invalid entry - please enter a number.''') | 39 |
from math import factorial
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError('''Please enter positive integers for n and k where n >= k''' )
return factorial(SCREAMING_SNAKE_CASE__ ) // (factorial(SCREAMING_SNAKE_CASE__ ) * factorial(n - k ))
if __name__ == "__main__":
print(
'''The number of five-card hands possible from a standard''',
f"""fifty-two card deck is: {combinations(52, 5)}\n""",
)
print(
'''If a class of 40 students must be arranged into groups of''',
f"""4 for group projects, there are {combinations(40, 4)} ways""",
'''to arrange them.\n''',
)
print(
'''If 10 teams are competing in a Formula One race, there''',
f"""are {combinations(10, 3)} ways that first, second and""",
'''third place can be awarded.''',
) | 39 | 1 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case_ ( __A , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = LongformerTokenizer
SCREAMING_SNAKE_CASE : List[Any] = True
SCREAMING_SNAKE_CASE : Any = LongformerTokenizerFast
SCREAMING_SNAKE_CASE : str = True
def snake_case__( self : List[str] ) ->Optional[int]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case_ = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
snake_case_ = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase ) ) ) )
snake_case_ = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
snake_case_ = {'''unk_token''': '''<unk>'''}
snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_UpperCamelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_UpperCamelCase ) )
def snake_case__( self : Union[str, Any] , **_UpperCamelCase : int ) ->Union[str, Any]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def snake_case__( self : int , **_UpperCamelCase : Dict ) ->List[Any]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def snake_case__( self : str , _UpperCamelCase : Optional[Any] ) ->str:
snake_case_ = '''lower newer'''
snake_case_ = '''lower newer'''
return input_text, output_text
def snake_case__( self : List[Any] ) ->int:
snake_case_ = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case_ = '''lower newer'''
snake_case_ = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
snake_case_ = tokenizer.tokenize(_UpperCamelCase ) # , add_prefix_space=True)
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
snake_case_ = tokens + [tokenizer.unk_token]
snake_case_ = [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , _UpperCamelCase )
def snake_case__( self : Optional[int] ) ->Any:
snake_case_ = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=_UpperCamelCase ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=_UpperCamelCase ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2] , )
@slow
def snake_case__( self : Optional[int] ) ->int:
snake_case_ = self.tokenizer_class.from_pretrained('''allenai/longformer-base-4096''' )
snake_case_ = tokenizer.encode('''sequence builders''' , add_special_tokens=_UpperCamelCase )
snake_case_ = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_UpperCamelCase )
snake_case_ = tokenizer.encode(
'''sequence builders''' , add_special_tokens=_UpperCamelCase , add_prefix_space=_UpperCamelCase )
snake_case_ = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=_UpperCamelCase , add_prefix_space=_UpperCamelCase )
snake_case_ = tokenizer.build_inputs_with_special_tokens(_UpperCamelCase )
snake_case_ = tokenizer.build_inputs_with_special_tokens(_UpperCamelCase , _UpperCamelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def snake_case__( self : Any ) ->Tuple:
snake_case_ = self.get_tokenizer()
snake_case_ = '''Encode this sequence.'''
snake_case_ = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]]
# Testing encoder arguments
snake_case_ = tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase , add_prefix_space=_UpperCamelCase )
snake_case_ = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(_UpperCamelCase , _UpperCamelCase )
snake_case_ = tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase , add_prefix_space=_UpperCamelCase )
snake_case_ = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
snake_case_ = tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
snake_case_ = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(_UpperCamelCase , _UpperCamelCase )
# Testing spaces after special tokens
snake_case_ = '''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase )} ) # mask token has a left space
snake_case_ = tokenizer.convert_tokens_to_ids(_UpperCamelCase )
snake_case_ = '''Encode <mask> sequence'''
snake_case_ = '''Encode <mask>sequence'''
snake_case_ = tokenizer.encode(_UpperCamelCase )
snake_case_ = encoded.index(_UpperCamelCase )
snake_case_ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
snake_case_ = tokenizer.encode(_UpperCamelCase )
snake_case_ = encoded.index(_UpperCamelCase )
snake_case_ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : Optional[int] ) ->Optional[int]:
pass
def snake_case__( self : List[Any] ) ->str:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
snake_case_ = self.rust_tokenizer_class.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
snake_case_ = self.tokenizer_class.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
snake_case_ = '''A, <mask> AllenNLP sentence.'''
snake_case_ = tokenizer_r.encode_plus(_UpperCamelCase , add_special_tokens=_UpperCamelCase , return_token_type_ids=_UpperCamelCase )
snake_case_ = tokenizer_p.encode_plus(_UpperCamelCase , add_special_tokens=_UpperCamelCase , return_token_type_ids=_UpperCamelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
snake_case_ = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
snake_case_ = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
_UpperCamelCase , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
_UpperCamelCase , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def snake_case__( self : str ) ->int:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
snake_case_ = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase )
snake_case_ = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
snake_case_ = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , _UpperCamelCase )
self.assertEqual(post_processor_state['''add_prefix_space'''] , _UpperCamelCase )
self.assertEqual(post_processor_state['''trim_offsets'''] , _UpperCamelCase )
def snake_case__( self : str ) ->Union[str, Any]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
snake_case_ = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
snake_case_ = f'''{text_of_1_token} {text_of_1_token}'''
snake_case_ = self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase , use_fast=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase )
snake_case_ = tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_UpperCamelCase ) + 1, len(_UpperCamelCase ) + 1 + len(_UpperCamelCase )) , )
snake_case_ = self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase , use_fast=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase )
snake_case_ = tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_UpperCamelCase ) + 1, len(_UpperCamelCase ) + 1 + len(_UpperCamelCase )) , )
snake_case_ = self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase , use_fast=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase )
snake_case_ = tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_UpperCamelCase ), len(_UpperCamelCase ) + 1 + len(_UpperCamelCase )) , )
snake_case_ = self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase , use_fast=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase )
snake_case_ = tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_UpperCamelCase ), len(_UpperCamelCase ) + 1 + len(_UpperCamelCase )) , )
snake_case_ = f''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
snake_case_ = self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase , use_fast=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase )
snake_case_ = tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_UpperCamelCase ) + 1, 1 + len(_UpperCamelCase ) + 1 + len(_UpperCamelCase )) , )
snake_case_ = self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase , use_fast=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase )
snake_case_ = tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_UpperCamelCase ), 1 + len(_UpperCamelCase ) + 1 + len(_UpperCamelCase )) , )
snake_case_ = self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase , use_fast=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase )
snake_case_ = tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_UpperCamelCase ), 1 + len(_UpperCamelCase ) + 1 + len(_UpperCamelCase )) , ) | 39 |
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
lowerCAmelCase_ = sys.version_info >= (3, 10)
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None ):
return field(default_factory=lambda: default , metadata=SCREAMING_SNAKE_CASE__ )
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : float
SCREAMING_SNAKE_CASE : str
SCREAMING_SNAKE_CASE : bool
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = 42
SCREAMING_SNAKE_CASE : str = field(default="toto" , metadata={"help": "help message"} )
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : bool = False
SCREAMING_SNAKE_CASE : bool = True
SCREAMING_SNAKE_CASE : Optional[bool] = None
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = "titi"
SCREAMING_SNAKE_CASE : Any = "toto"
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = "titi"
SCREAMING_SNAKE_CASE : Optional[Any] = "toto"
SCREAMING_SNAKE_CASE : Any = 42
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : BasicEnum = "toto"
def snake_case__( self : Tuple ) ->List[str]:
snake_case_ = BasicEnum(self.foo )
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : MixedTypeEnum = "toto"
def snake_case__( self : Union[str, Any] ) ->Dict:
snake_case_ = MixedTypeEnum(self.foo )
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Optional[float] = field(default=__A , metadata={"help": "help message"} )
SCREAMING_SNAKE_CASE : Optional[str] = None
SCREAMING_SNAKE_CASE : Optional[List[str]] = list_field(default=[] )
SCREAMING_SNAKE_CASE : Optional[List[int]] = list_field(default=[] )
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[int] = list_field(default=[] )
SCREAMING_SNAKE_CASE : List[int] = list_field(default=[1, 2, 3] )
SCREAMING_SNAKE_CASE : List[str] = list_field(default=["Hallo", "Bonjour", "Hello"] )
SCREAMING_SNAKE_CASE : List[float] = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[int] = field()
SCREAMING_SNAKE_CASE : str = field()
SCREAMING_SNAKE_CASE : BasicEnum = field()
def snake_case__( self : Optional[Any] ) ->Tuple:
snake_case_ = BasicEnum(self.required_enum )
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : "BasicEnum" = field()
SCREAMING_SNAKE_CASE : "Optional[bool]" = None
SCREAMING_SNAKE_CASE : "str" = field(default="toto" , metadata={"help": "help message"} )
SCREAMING_SNAKE_CASE : "List[str]" = list_field(default=["Hallo", "Bonjour", "Hello"] )
if is_python_no_less_than_3_10:
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : bool = False
SCREAMING_SNAKE_CASE : bool = True
SCREAMING_SNAKE_CASE : bool | None = None
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : int | None = None
SCREAMING_SNAKE_CASE : float | None = field(default=__A , metadata={"help": "help message"} )
SCREAMING_SNAKE_CASE : str | None = None
SCREAMING_SNAKE_CASE : list[str] | None = list_field(default=[] )
SCREAMING_SNAKE_CASE : list[int] | None = list_field(default=[] )
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : Dict , _UpperCamelCase : argparse.ArgumentParser , _UpperCamelCase : argparse.ArgumentParser ) ->str:
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
snake_case_ = {k: v for k, v in vars(_UpperCamelCase ).items() if k != '''container'''}
snake_case_ = {k: v for k, v in vars(_UpperCamelCase ).items() if k != '''container'''}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('''choices''' , _UpperCamelCase ) and yy.get('''choices''' , _UpperCamelCase ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['''type'''](_UpperCamelCase ) , yy['''type'''](_UpperCamelCase ) )
del xx["type"], yy["type"]
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : Optional[Any] ) ->Dict:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_UpperCamelCase , required=_UpperCamelCase )
expected.add_argument('''--bar''' , type=_UpperCamelCase , required=_UpperCamelCase )
expected.add_argument('''--baz''' , type=_UpperCamelCase , required=_UpperCamelCase )
expected.add_argument('''--flag''' , type=_UpperCamelCase , default=_UpperCamelCase , const=_UpperCamelCase , nargs='''?''' )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
snake_case_ = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5''']
((snake_case_), ) = parser.parse_args_into_dataclasses(_UpperCamelCase , look_for_args_file=_UpperCamelCase )
self.assertFalse(example.flag )
def snake_case__( self : Tuple ) ->Optional[int]:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=4_2 , type=_UpperCamelCase )
expected.add_argument('''--baz''' , default='''toto''' , type=_UpperCamelCase , help='''help message''' )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : Tuple ) ->Tuple:
snake_case_ = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_UpperCamelCase , default=_UpperCamelCase , const=_UpperCamelCase , nargs='''?''' )
expected.add_argument('''--baz''' , type=_UpperCamelCase , default=_UpperCamelCase , const=_UpperCamelCase , nargs='''?''' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('''--no_baz''' , action='''store_false''' , default=_UpperCamelCase , dest='''baz''' )
expected.add_argument('''--opt''' , type=_UpperCamelCase , default=_UpperCamelCase )
snake_case_ = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_UpperCamelCase )
for dataclass_type in dataclass_types:
snake_case_ = HfArgumentParser(_UpperCamelCase )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
snake_case_ = parser.parse_args([] )
self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , baz=_UpperCamelCase , opt=_UpperCamelCase ) )
snake_case_ = parser.parse_args(['''--foo''', '''--no_baz'''] )
self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , baz=_UpperCamelCase , opt=_UpperCamelCase ) )
snake_case_ = parser.parse_args(['''--foo''', '''--baz'''] )
self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , baz=_UpperCamelCase , opt=_UpperCamelCase ) )
snake_case_ = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] )
self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , baz=_UpperCamelCase , opt=_UpperCamelCase ) )
snake_case_ = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] )
self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , baz=_UpperCamelCase , opt=_UpperCamelCase ) )
def snake_case__( self : Tuple ) ->Tuple:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 4_2] , type=make_choice_type_function(['''titi''', '''toto''', 4_2] ) , )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
snake_case_ = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
snake_case_ = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
snake_case_ = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
snake_case_ = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
snake_case_ = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 4_2 )
snake_case_ = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def snake_case__( self : Tuple ) ->Union[str, Any]:
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : Literal["titi", "toto", 42] = "toto"
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 4_2) , type=make_choice_type_function(['''titi''', '''toto''', 4_2] ) , )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
snake_case_ = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
snake_case_ = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
snake_case_ = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 4_2 )
def snake_case__( self : List[str] ) ->int:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = argparse.ArgumentParser()
expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=_UpperCamelCase )
expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=_UpperCamelCase )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_UpperCamelCase )
expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=_UpperCamelCase )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
snake_case_ = parser.parse_args([] )
self.assertEqual(
_UpperCamelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , )
snake_case_ = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() )
self.assertEqual(_UpperCamelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) )
def snake_case__( self : Optional[Any] ) ->List[Any]:
snake_case_ = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=_UpperCamelCase , type=_UpperCamelCase )
expected.add_argument('''--bar''' , default=_UpperCamelCase , type=_UpperCamelCase , help='''help message''' )
expected.add_argument('''--baz''' , default=_UpperCamelCase , type=_UpperCamelCase )
expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=_UpperCamelCase )
expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=_UpperCamelCase )
snake_case_ = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_UpperCamelCase )
for dataclass_type in dataclass_types:
snake_case_ = HfArgumentParser(_UpperCamelCase )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
snake_case_ = parser.parse_args([] )
self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , bar=_UpperCamelCase , baz=_UpperCamelCase , ces=[] , des=[] ) )
snake_case_ = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() )
self.assertEqual(_UpperCamelCase , Namespace(foo=1_2 , bar=3.14 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) )
def snake_case__( self : Union[str, Any] ) ->Optional[int]:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = argparse.ArgumentParser()
expected.add_argument('''--required_list''' , nargs='''+''' , type=_UpperCamelCase , required=_UpperCamelCase )
expected.add_argument('''--required_str''' , type=_UpperCamelCase , required=_UpperCamelCase )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_UpperCamelCase , )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : List[str] ) ->int:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_UpperCamelCase , required=_UpperCamelCase )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_UpperCamelCase , )
expected.add_argument('''--opt''' , type=_UpperCamelCase , default=_UpperCamelCase )
expected.add_argument('''--baz''' , default='''toto''' , type=_UpperCamelCase , help='''help message''' )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_UpperCamelCase )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : Dict ) ->Any:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = {
'''foo''': 1_2,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
snake_case_ = parser.parse_dict(_UpperCamelCase )[0]
snake_case_ = BasicExample(**_UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : int ) ->Dict:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = {
'''foo''': 1_2,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
'''extra''': 4_2,
}
self.assertRaises(_UpperCamelCase , parser.parse_dict , _UpperCamelCase , allow_extra_keys=_UpperCamelCase )
def snake_case__( self : str ) ->Tuple:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = {
'''foo''': 1_2,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ = os.path.join(_UpperCamelCase , '''temp_json''' )
os.mkdir(_UpperCamelCase )
with open(temp_local_path + '''.json''' , '''w+''' ) as f:
json.dump(_UpperCamelCase , _UpperCamelCase )
snake_case_ = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0]
snake_case_ = BasicExample(**_UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : Optional[int] ) ->str:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = {
'''foo''': 1_2,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ = os.path.join(_UpperCamelCase , '''temp_yaml''' )
os.mkdir(_UpperCamelCase )
with open(temp_local_path + '''.yaml''' , '''w+''' ) as f:
yaml.dump(_UpperCamelCase , _UpperCamelCase )
snake_case_ = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0]
snake_case_ = BasicExample(**_UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : Any ) ->Any:
snake_case_ = HfArgumentParser(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase ) | 39 | 1 |
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if number < 0 or shift_amount < 0:
raise ValueError('''both inputs must be positive integers''' )
snake_case_ = str(bin(SCREAMING_SNAKE_CASE__ ) )
binary_number += "0" * shift_amount
return binary_number
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if number < 0 or shift_amount < 0:
raise ValueError('''both inputs must be positive integers''' )
snake_case_ = str(bin(SCREAMING_SNAKE_CASE__ ) )[2:]
if shift_amount >= len(SCREAMING_SNAKE_CASE__ ):
return "0b0"
snake_case_ = binary_number[: len(SCREAMING_SNAKE_CASE__ ) - shift_amount]
return "0b" + shifted_binary_number
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if number >= 0: # Get binary representation of positive number
snake_case_ = '''0''' + str(bin(SCREAMING_SNAKE_CASE__ ) ).strip('''-''' )[2:]
else: # Get binary (2's complement) representation of negative number
snake_case_ = len(bin(SCREAMING_SNAKE_CASE__ )[3:] ) # Find 2's complement of number
snake_case_ = bin(abs(SCREAMING_SNAKE_CASE__ ) - (1 << binary_number_length) )[3:]
snake_case_ = (
'''1''' + '''0''' * (binary_number_length - len(SCREAMING_SNAKE_CASE__ )) + binary_number
)
if shift_amount >= len(SCREAMING_SNAKE_CASE__ ):
return "0b" + binary_number[0] * len(SCREAMING_SNAKE_CASE__ )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(SCREAMING_SNAKE_CASE__ ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod() | 39 |
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
lowerCAmelCase_ = logging.get_logger(__name__)
class snake_case_ ( __A ):
'''simple docstring'''
def __init__( self : Dict , *_UpperCamelCase : int , **_UpperCamelCase : Tuple ) ->None:
warnings.warn(
'''The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use ChineseCLIPImageProcessor instead.''' , _UpperCamelCase , )
super().__init__(*_UpperCamelCase , **_UpperCamelCase ) | 39 | 1 |
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
lowerCAmelCase_ = '''platform'''
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , ):
if attention_mask is None:
snake_case_ = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
snake_case_ = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
snake_case_ = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
snake_case_ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
snake_case_ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class snake_case_ :
'''simple docstring'''
def __init__( self : Tuple , _UpperCamelCase : str , _UpperCamelCase : Tuple=1_3 , _UpperCamelCase : Union[str, Any]=7 , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : Tuple=False , _UpperCamelCase : str=9_9 , _UpperCamelCase : List[str]=1_6 , _UpperCamelCase : Any=2 , _UpperCamelCase : List[str]=4 , _UpperCamelCase : Union[str, Any]=4 , _UpperCamelCase : Tuple="gelu" , _UpperCamelCase : List[str]=0.1 , _UpperCamelCase : int=0.1 , _UpperCamelCase : Union[str, Any]=3_2 , _UpperCamelCase : int=2 , _UpperCamelCase : Optional[int]=1 , _UpperCamelCase : Any=0 , _UpperCamelCase : Union[str, Any]=0.02 , ) ->Union[str, Any]:
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = eos_token_id
snake_case_ = pad_token_id
snake_case_ = bos_token_id
snake_case_ = initializer_range
def snake_case__( self : Union[str, Any] ) ->Optional[int]:
snake_case_ = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
snake_case_ = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
snake_case_ = shift_tokens_right(_UpperCamelCase , 1 , 2 )
snake_case_ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_UpperCamelCase , )
snake_case_ = prepare_blenderbot_inputs_dict(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return config, inputs_dict
def snake_case__( self : List[Any] ) ->int:
snake_case_, snake_case_ = self.prepare_config_and_inputs()
return config, inputs_dict
def snake_case__( self : List[Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] ) ->Optional[int]:
snake_case_ = 2_0
snake_case_ = model_class_name(_UpperCamelCase )
snake_case_ = model.encode(inputs_dict['''input_ids'''] )
snake_case_, snake_case_ = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
snake_case_ = model.init_cache(decoder_input_ids.shape[0] , _UpperCamelCase , _UpperCamelCase )
snake_case_ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
snake_case_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
snake_case_ = model.decode(
decoder_input_ids[:, :-1] , _UpperCamelCase , decoder_attention_mask=_UpperCamelCase , past_key_values=_UpperCamelCase , decoder_position_ids=_UpperCamelCase , )
snake_case_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
snake_case_ = model.decode(
decoder_input_ids[:, -1:] , _UpperCamelCase , decoder_attention_mask=_UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_UpperCamelCase , )
snake_case_ = model.decode(_UpperCamelCase , _UpperCamelCase )
snake_case_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
def snake_case__( self : Any , _UpperCamelCase : str , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[Any] ) ->Tuple:
snake_case_ = 2_0
snake_case_ = model_class_name(_UpperCamelCase )
snake_case_ = model.encode(inputs_dict['''input_ids'''] )
snake_case_, snake_case_ = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
snake_case_ = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
snake_case_ = model.init_cache(decoder_input_ids.shape[0] , _UpperCamelCase , _UpperCamelCase )
snake_case_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
snake_case_ = model.decode(
decoder_input_ids[:, :-1] , _UpperCamelCase , decoder_attention_mask=_UpperCamelCase , past_key_values=_UpperCamelCase , decoder_position_ids=_UpperCamelCase , )
snake_case_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
snake_case_ = model.decode(
decoder_input_ids[:, -1:] , _UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_UpperCamelCase , decoder_position_ids=_UpperCamelCase , )
snake_case_ = model.decode(_UpperCamelCase , _UpperCamelCase , decoder_attention_mask=_UpperCamelCase )
snake_case_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
@require_flax
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = 99
def snake_case__( self : Tuple ) ->Any:
snake_case_ = np.array(
[
[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2],
[6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2],
[5, 9_7, 1_7, 3_9, 9_4, 4_0, 2],
[7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2],
[8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2],
[5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding
[6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2],
[5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2],
[4_8, 6_1, 9, 2_4, 7_1, 8_2, 2],
[2_6, 1, 6_0, 4_8, 2_2, 1_3, 2],
[2_1, 5, 6_2, 2_8, 1_4, 7_6, 2],
[4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2],
[7_0, 7_0, 5_0, 9, 2_8, 0, 2],
] , dtype=np.intaa , )
snake_case_ = input_ids.shape[0]
snake_case_ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def snake_case__( self : Any ) ->Tuple:
snake_case_, snake_case_, snake_case_ = self._get_config_and_data()
snake_case_ = FlaxBlenderbotForConditionalGeneration(_UpperCamelCase )
snake_case_ = lm_model(input_ids=_UpperCamelCase )
snake_case_ = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , _UpperCamelCase )
def snake_case__( self : Optional[Any] ) ->Optional[Any]:
snake_case_ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , )
snake_case_ = FlaxBlenderbotForConditionalGeneration(_UpperCamelCase )
snake_case_ = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa )
snake_case_ = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa )
snake_case_ = lm_model(input_ids=_UpperCamelCase , decoder_input_ids=_UpperCamelCase )
snake_case_ = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , _UpperCamelCase )
def snake_case__( self : Tuple ) ->List[str]:
snake_case_ = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa )
snake_case_ = shift_tokens_right(_UpperCamelCase , 1 , 2 )
snake_case_ = np.equal(_UpperCamelCase , 1 ).astype(np.floataa ).sum()
snake_case_ = np.equal(_UpperCamelCase , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(_UpperCamelCase , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class snake_case_ ( __A , unittest.TestCase , __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = True
SCREAMING_SNAKE_CASE : Optional[Any] = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
SCREAMING_SNAKE_CASE : List[str] = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def snake_case__( self : List[str] ) ->Dict:
snake_case_ = FlaxBlenderbotModelTester(self )
def snake_case__( self : int ) ->Any:
snake_case_, snake_case_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def snake_case__( self : List[str] ) ->List[Any]:
snake_case_, snake_case_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def snake_case__( self : List[Any] ) ->Optional[int]:
snake_case_, snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
snake_case_ = self._prepare_for_class(_UpperCamelCase , _UpperCamelCase )
snake_case_ = model_class(_UpperCamelCase )
@jax.jit
def encode_jitted(_UpperCamelCase : str , _UpperCamelCase : Union[str, Any]=None , **_UpperCamelCase : List[str] ):
return model.encode(input_ids=_UpperCamelCase , attention_mask=_UpperCamelCase )
with self.subTest('''JIT Enabled''' ):
snake_case_ = encode_jitted(**_UpperCamelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
snake_case_ = encode_jitted(**_UpperCamelCase ).to_tuple()
self.assertEqual(len(_UpperCamelCase ) , len(_UpperCamelCase ) )
for jitted_output, output in zip(_UpperCamelCase , _UpperCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def snake_case__( self : List[str] ) ->Any:
snake_case_, snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
snake_case_ = model_class(_UpperCamelCase )
snake_case_ = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
snake_case_ = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(_UpperCamelCase : List[str] , _UpperCamelCase : Any , _UpperCamelCase : List[Any] ):
return model.decode(
decoder_input_ids=_UpperCamelCase , decoder_attention_mask=_UpperCamelCase , encoder_outputs=_UpperCamelCase , )
with self.subTest('''JIT Enabled''' ):
snake_case_ = decode_jitted(**_UpperCamelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
snake_case_ = decode_jitted(**_UpperCamelCase ).to_tuple()
self.assertEqual(len(_UpperCamelCase ) , len(_UpperCamelCase ) )
for jitted_output, output in zip(_UpperCamelCase , _UpperCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def snake_case__( self : Dict ) ->Optional[Any]:
for model_class_name in self.all_model_classes:
snake_case_ = model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
snake_case_ = np.ones((1, 1) ) * model.config.eos_token_id
snake_case_ = model(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
@unittest.skipUnless(jax_device != '''cpu''' , '''3B test too slow on CPU.''' )
@slow
def snake_case__( self : Optional[Any] ) ->int:
snake_case_ = {'''num_beams''': 1, '''early_stopping''': True, '''min_length''': 1_5, '''max_length''': 2_5}
snake_case_ = {'''skip_special_tokens''': True, '''clean_up_tokenization_spaces''': True}
snake_case_ = FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''' , from_pt=_UpperCamelCase )
snake_case_ = BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''' )
snake_case_ = ['''Sam''']
snake_case_ = tokenizer(_UpperCamelCase , return_tensors='''jax''' )
snake_case_ = model.generate(**_UpperCamelCase , **_UpperCamelCase )
snake_case_ = '''Sam is a great name. It means "sun" in Gaelic.'''
snake_case_ = tokenizer.batch_decode(_UpperCamelCase , **_UpperCamelCase )
assert generated_txt[0].strip() == tgt_text | 39 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = "rwkv"
SCREAMING_SNAKE_CASE : Any = {"max_position_embeddings": "context_length"}
def __init__( self : Union[str, Any] , _UpperCamelCase : Any=5_0_2_7_7 , _UpperCamelCase : Optional[int]=1_0_2_4 , _UpperCamelCase : Optional[int]=4_0_9_6 , _UpperCamelCase : str=3_2 , _UpperCamelCase : Tuple=None , _UpperCamelCase : Dict=None , _UpperCamelCase : Optional[int]=1e-5 , _UpperCamelCase : Any=0 , _UpperCamelCase : Optional[Any]=0 , _UpperCamelCase : int=6 , _UpperCamelCase : Dict=False , _UpperCamelCase : Optional[int]=True , **_UpperCamelCase : int , ) ->List[str]:
snake_case_ = vocab_size
snake_case_ = context_length
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = attention_hidden_size if attention_hidden_size is not None else hidden_size
snake_case_ = intermediate_size if intermediate_size is not None else 4 * hidden_size
snake_case_ = layer_norm_epsilon
snake_case_ = rescale_every
snake_case_ = use_cache
snake_case_ = bos_token_id
snake_case_ = eos_token_id
super().__init__(
tie_word_embeddings=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase ) | 39 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase_ = {
'''vocab_file''': {
'''squeezebert/squeezebert-uncased''': (
'''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'''
),
'''squeezebert/squeezebert-mnli''': '''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt''',
'''squeezebert/squeezebert-mnli-headless''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''squeezebert/squeezebert-uncased''': (
'''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'''
),
'''squeezebert/squeezebert-mnli''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'''
),
'''squeezebert/squeezebert-mnli-headless''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase_ = {
'''squeezebert/squeezebert-uncased''': 5_12,
'''squeezebert/squeezebert-mnli''': 5_12,
'''squeezebert/squeezebert-mnli-headless''': 5_12,
}
lowerCAmelCase_ = {
'''squeezebert/squeezebert-uncased''': {'''do_lower_case''': True},
'''squeezebert/squeezebert-mnli''': {'''do_lower_case''': True},
'''squeezebert/squeezebert-mnli-headless''': {'''do_lower_case''': True},
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : int = SqueezeBertTokenizer
def __init__( self : List[Any] , _UpperCamelCase : int=None , _UpperCamelCase : List[Any]=None , _UpperCamelCase : Any=True , _UpperCamelCase : Dict="[UNK]" , _UpperCamelCase : int="[SEP]" , _UpperCamelCase : int="[PAD]" , _UpperCamelCase : List[str]="[CLS]" , _UpperCamelCase : List[Any]="[MASK]" , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : List[str]=None , **_UpperCamelCase : Optional[int] , ) ->List[str]:
super().__init__(
_UpperCamelCase , tokenizer_file=_UpperCamelCase , do_lower_case=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , tokenize_chinese_chars=_UpperCamelCase , strip_accents=_UpperCamelCase , **_UpperCamelCase , )
snake_case_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _UpperCamelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _UpperCamelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _UpperCamelCase ) != tokenize_chinese_chars
):
snake_case_ = getattr(_UpperCamelCase , normalizer_state.pop('''type''' ) )
snake_case_ = do_lower_case
snake_case_ = strip_accents
snake_case_ = tokenize_chinese_chars
snake_case_ = normalizer_class(**_UpperCamelCase )
snake_case_ = do_lower_case
def snake_case__( self : List[Any] , _UpperCamelCase : Dict , _UpperCamelCase : Union[str, Any]=None ) ->Optional[int]:
snake_case_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case__( self : List[Any] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) ->List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case__( self : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) ->Tuple[str]:
snake_case_ = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase )
return tuple(_UpperCamelCase ) | 39 |
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
lowerCAmelCase_ = get_logger(__name__)
class snake_case_ :
'''simple docstring'''
def __init__( self : int , _UpperCamelCase : Optional[str] = None ) ->Tuple:
snake_case_ = (
os.path.join(_UpperCamelCase , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
snake_case_ = Extractor
def snake_case__( self : Any , _UpperCamelCase : str ) ->str:
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
snake_case_ = os.path.abspath(_UpperCamelCase )
return os.path.join(self.extract_dir , hash_url_to_filename(_UpperCamelCase ) )
def snake_case__( self : int , _UpperCamelCase : str , _UpperCamelCase : bool ) ->bool:
return force_extract or (
not os.path.isfile(_UpperCamelCase ) and not (os.path.isdir(_UpperCamelCase ) and os.listdir(_UpperCamelCase ))
)
def snake_case__( self : Tuple , _UpperCamelCase : str , _UpperCamelCase : bool = False ) ->str:
snake_case_ = self.extractor.infer_extractor_format(_UpperCamelCase )
if not extractor_format:
return input_path
snake_case_ = self._get_output_path(_UpperCamelCase )
if self._do_extract(_UpperCamelCase , _UpperCamelCase ):
self.extractor.extract(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return output_path
class snake_case_ ( __A ):
'''simple docstring'''
@classmethod
@abstractmethod
def snake_case__( cls : Optional[int] , _UpperCamelCase : Union[Path, str] , **_UpperCamelCase : str ) ->bool:
...
@staticmethod
@abstractmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
...
class snake_case_ ( __A , __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[bytes] = []
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : int ) ->List[Any]:
with open(_UpperCamelCase , '''rb''' ) as f:
return f.read(_UpperCamelCase )
@classmethod
def snake_case__( cls : Union[str, Any] , _UpperCamelCase : Union[Path, str] , _UpperCamelCase : bytes = b"" ) ->bool:
if not magic_number:
snake_case_ = max(len(_UpperCamelCase ) for cls_magic_number in cls.magic_numbers )
try:
snake_case_ = cls.read_magic_number(_UpperCamelCase , _UpperCamelCase )
except OSError:
return False
return any(magic_number.startswith(_UpperCamelCase ) for cls_magic_number in cls.magic_numbers )
class snake_case_ ( __A ):
'''simple docstring'''
@classmethod
def snake_case__( cls : Union[str, Any] , _UpperCamelCase : Union[Path, str] , **_UpperCamelCase : Any ) ->bool:
return tarfile.is_tarfile(_UpperCamelCase )
@staticmethod
def snake_case__( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Dict ) ->List[str]:
def resolved(_UpperCamelCase : str ) -> str:
return os.path.realpath(os.path.abspath(_UpperCamelCase ) )
def badpath(_UpperCamelCase : str , _UpperCamelCase : str ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(_UpperCamelCase , _UpperCamelCase ) ).startswith(_UpperCamelCase )
def badlink(_UpperCamelCase : Tuple , _UpperCamelCase : str ) -> bool:
# Links are interpreted relative to the directory containing the link
snake_case_ = resolved(os.path.join(_UpperCamelCase , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=_UpperCamelCase )
snake_case_ = resolved(_UpperCamelCase )
for finfo in members:
if badpath(finfo.name , _UpperCamelCase ):
logger.error(f'''Extraction of {finfo.name} is blocked (illegal path)''' )
elif finfo.issym() and badlink(_UpperCamelCase , _UpperCamelCase ):
logger.error(f'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''' )
elif finfo.islnk() and badlink(_UpperCamelCase , _UpperCamelCase ):
logger.error(f'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''' )
else:
yield finfo
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
snake_case_ = tarfile.open(_UpperCamelCase )
tar_file.extractall(_UpperCamelCase , members=TarExtractor.safemembers(_UpperCamelCase , _UpperCamelCase ) )
tar_file.close()
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = [b"\x1F\x8B"]
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
with gzip.open(_UpperCamelCase , '''rb''' ) as gzip_file:
with open(_UpperCamelCase , '''wb''' ) as extracted_file:
shutil.copyfileobj(_UpperCamelCase , _UpperCamelCase )
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = [
b"PK\x03\x04",
b"PK\x05\x06", # empty archive
b"PK\x07\x08", # spanned archive
]
@classmethod
def snake_case__( cls : List[str] , _UpperCamelCase : Union[Path, str] , _UpperCamelCase : bytes = b"" ) ->bool:
if super().is_extractable(_UpperCamelCase , magic_number=_UpperCamelCase ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(_UpperCamelCase , '''rb''' ) as fp:
snake_case_ = _EndRecData(_UpperCamelCase )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
snake_case_ = fp.read(_UpperCamelCase ) # CD is where we expect it to be
if len(_UpperCamelCase ) == sizeCentralDir:
snake_case_ = struct.unpack(_UpperCamelCase , _UpperCamelCase ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
with zipfile.ZipFile(_UpperCamelCase , '''r''' ) as zip_file:
zip_file.extractall(_UpperCamelCase )
zip_file.close()
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = [b"\xFD\x37\x7A\x58\x5A\x00"]
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
with lzma.open(_UpperCamelCase ) as compressed_file:
with open(_UpperCamelCase , '''wb''' ) as extracted_file:
shutil.copyfileobj(_UpperCamelCase , _UpperCamelCase )
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = [b"Rar!\x1a\x07\x00", b"Rar!\x1a\x07\x01\x00"] # RAR_ID # RAR5_ID
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
if not config.RARFILE_AVAILABLE:
raise ImportError('''Please pip install rarfile''' )
import rarfile
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
snake_case_ = rarfile.RarFile(_UpperCamelCase )
rf.extractall(_UpperCamelCase )
rf.close()
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = [b"\x28\xb5\x2F\xFD"]
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
if not config.ZSTANDARD_AVAILABLE:
raise ImportError('''Please pip install zstandard''' )
import zstandard as zstd
snake_case_ = zstd.ZstdDecompressor()
with open(_UpperCamelCase , '''rb''' ) as ifh, open(_UpperCamelCase , '''wb''' ) as ofh:
dctx.copy_stream(_UpperCamelCase , _UpperCamelCase )
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = [b"\x42\x5A\x68"]
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
with bza.open(_UpperCamelCase , '''rb''' ) as compressed_file:
with open(_UpperCamelCase , '''wb''' ) as extracted_file:
shutil.copyfileobj(_UpperCamelCase , _UpperCamelCase )
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = [b"\x37\x7A\xBC\xAF\x27\x1C"]
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
if not config.PY7ZR_AVAILABLE:
raise ImportError('''Please pip install py7zr''' )
import pyazr
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
with pyazr.SevenZipFile(_UpperCamelCase , '''r''' ) as archive:
archive.extractall(_UpperCamelCase )
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = [b"\x04\x22\x4D\x18"]
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
if not config.LZ4_AVAILABLE:
raise ImportError('''Please pip install lz4''' )
import lza.frame
with lza.frame.open(_UpperCamelCase , '''rb''' ) as compressed_file:
with open(_UpperCamelCase , '''wb''' ) as extracted_file:
shutil.copyfileobj(_UpperCamelCase , _UpperCamelCase )
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def snake_case__( cls : List[Any] ) ->List[str]:
return max(
len(_UpperCamelCase )
for extractor in cls.extractors.values()
if issubclass(_UpperCamelCase , _UpperCamelCase )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : int ) ->Tuple:
try:
return MagicNumberBaseExtractor.read_magic_number(_UpperCamelCase , magic_number_length=_UpperCamelCase )
except OSError:
return b""
@classmethod
def snake_case__( cls : Optional[Any] , _UpperCamelCase : Union[Path, str] , _UpperCamelCase : bool = False ) ->bool:
warnings.warn(
'''Method \'is_extractable\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '''
'''Use \'infer_extractor_format\' instead.''' , category=_UpperCamelCase , )
snake_case_ = cls.infer_extractor_format(_UpperCamelCase )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def snake_case__( cls : int , _UpperCamelCase : Union[Path, str] ) ->str: # <Added version="2.4.0"/>
snake_case_ = cls._get_magic_number_max_length()
snake_case_ = cls._read_magic_number(_UpperCamelCase , _UpperCamelCase )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(_UpperCamelCase , magic_number=_UpperCamelCase ):
return extractor_format
@classmethod
def snake_case__( cls : Optional[int] , _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Optional[str] = None , _UpperCamelCase : Optional[BaseExtractor] = "deprecated" , ) ->None:
os.makedirs(os.path.dirname(_UpperCamelCase ) , exist_ok=_UpperCamelCase )
# Prevent parallel extractions
snake_case_ = str(Path(_UpperCamelCase ).with_suffix('''.lock''' ) )
with FileLock(_UpperCamelCase ):
shutil.rmtree(_UpperCamelCase , ignore_errors=_UpperCamelCase )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(_UpperCamelCase , _UpperCamelCase ): # passed as positional arg
warnings.warn(
'''Parameter \'extractor\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '''
'''Use \'extractor_format\' instead.''' , category=_UpperCamelCase , )
snake_case_ = extractor if extractor != '''deprecated''' else extractor_format
else:
snake_case_ = cls.extractors[extractor_format]
return extractor.extract(_UpperCamelCase , _UpperCamelCase )
else:
warnings.warn(
'''Parameter \'extractor_format\' was made required in version 2.4.0 and not passing it will raise an '''
'''exception in 3.0.0.''' , category=_UpperCamelCase , )
for extractor in cls.extractors.values():
if extractor.is_extractable(_UpperCamelCase ):
return extractor.extract(_UpperCamelCase , _UpperCamelCase ) | 39 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class snake_case_ ( __A , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = UnCLIPImageVariationPipeline
SCREAMING_SNAKE_CASE : Optional[int] = IMAGE_VARIATION_PARAMS - {"height", "width", "guidance_scale"}
SCREAMING_SNAKE_CASE : Optional[Any] = IMAGE_VARIATION_BATCH_PARAMS
SCREAMING_SNAKE_CASE : List[Any] = [
"generator",
"return_dict",
"decoder_num_inference_steps",
"super_res_num_inference_steps",
]
SCREAMING_SNAKE_CASE : List[str] = False
@property
def snake_case__( self : Optional[Any] ) ->int:
return 3_2
@property
def snake_case__( self : List[Any] ) ->List[str]:
return 3_2
@property
def snake_case__( self : Optional[Any] ) ->int:
return self.time_input_dim
@property
def snake_case__( self : List[str] ) ->Union[str, Any]:
return self.time_input_dim * 4
@property
def snake_case__( self : Any ) ->Tuple:
return 1_0_0
@property
def snake_case__( self : List[Any] ) ->int:
snake_case_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def snake_case__( self : List[str] ) ->List[Any]:
torch.manual_seed(0 )
snake_case_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(_UpperCamelCase )
@property
def snake_case__( self : int ) ->Optional[Any]:
torch.manual_seed(0 )
snake_case_ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=3_2 , intermediate_size=3_7 , patch_size=1 , )
return CLIPVisionModelWithProjection(_UpperCamelCase )
@property
def snake_case__( self : Optional[Any] ) ->str:
torch.manual_seed(0 )
snake_case_ = {
'''clip_embeddings_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''cross_attention_dim''': self.cross_attention_dim,
}
snake_case_ = UnCLIPTextProjModel(**_UpperCamelCase )
return model
@property
def snake_case__( self : Union[str, Any] ) ->Union[str, Any]:
torch.manual_seed(0 )
snake_case_ = {
'''sample_size''': 3_2,
# RGB in channels
'''in_channels''': 3,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 6,
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': '''identity''',
}
snake_case_ = UNetaDConditionModel(**_UpperCamelCase )
return model
@property
def snake_case__( self : List[Any] ) ->Any:
return {
"sample_size": 6_4,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def snake_case__( self : str ) ->Union[str, Any]:
torch.manual_seed(0 )
snake_case_ = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def snake_case__( self : Dict ) ->str:
# seeded differently to get different unet than `self.dummy_super_res_first`
torch.manual_seed(1 )
snake_case_ = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def snake_case__( self : List[Any] ) ->Optional[int]:
snake_case_ = self.dummy_decoder
snake_case_ = self.dummy_text_proj
snake_case_ = self.dummy_text_encoder
snake_case_ = self.dummy_tokenizer
snake_case_ = self.dummy_super_res_first
snake_case_ = self.dummy_super_res_last
snake_case_ = UnCLIPScheduler(
variance_type='''learned_range''' , prediction_type='''epsilon''' , num_train_timesteps=1_0_0_0 , )
snake_case_ = UnCLIPScheduler(
variance_type='''fixed_small_log''' , prediction_type='''epsilon''' , num_train_timesteps=1_0_0_0 , )
snake_case_ = CLIPImageProcessor(crop_size=3_2 , size=3_2 )
snake_case_ = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def snake_case__( self : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : str=0 , _UpperCamelCase : Any=True ) ->Dict:
snake_case_ = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
if str(_UpperCamelCase ).startswith('''mps''' ):
snake_case_ = torch.manual_seed(_UpperCamelCase )
else:
snake_case_ = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
if pil_image:
snake_case_ = input_image * 0.5 + 0.5
snake_case_ = input_image.clamp(0 , 1 )
snake_case_ = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
snake_case_ = DiffusionPipeline.numpy_to_pil(_UpperCamelCase )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def snake_case__( self : List[str] ) ->List[Any]:
snake_case_ = '''cpu'''
snake_case_ = self.get_dummy_components()
snake_case_ = self.pipeline_class(**_UpperCamelCase )
snake_case_ = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
snake_case_ = self.get_dummy_inputs(_UpperCamelCase , pil_image=_UpperCamelCase )
snake_case_ = pipe(**_UpperCamelCase )
snake_case_ = output.images
snake_case_ = self.get_dummy_inputs(_UpperCamelCase , pil_image=_UpperCamelCase )
snake_case_ = pipe(
**_UpperCamelCase , return_dict=_UpperCamelCase , )[0]
snake_case_ = image[0, -3:, -3:, -1]
snake_case_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
snake_case_ = np.array(
[
0.9997,
0.0002,
0.9997,
0.9997,
0.9969,
0.0023,
0.9997,
0.9969,
0.9970,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case__( self : Tuple ) ->int:
snake_case_ = '''cpu'''
snake_case_ = self.get_dummy_components()
snake_case_ = self.pipeline_class(**_UpperCamelCase )
snake_case_ = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
snake_case_ = self.get_dummy_inputs(_UpperCamelCase , pil_image=_UpperCamelCase )
snake_case_ = pipe(**_UpperCamelCase )
snake_case_ = output.images
snake_case_ = self.get_dummy_inputs(_UpperCamelCase , pil_image=_UpperCamelCase )
snake_case_ = pipe(
**_UpperCamelCase , return_dict=_UpperCamelCase , )[0]
snake_case_ = image[0, -3:, -3:, -1]
snake_case_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
snake_case_ = np.array([0.9997, 0.0003, 0.9997, 0.9997, 0.9970, 0.0024, 0.9997, 0.9971, 0.9971] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case__( self : Any ) ->List[str]:
snake_case_ = '''cpu'''
snake_case_ = self.get_dummy_components()
snake_case_ = self.pipeline_class(**_UpperCamelCase )
snake_case_ = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
snake_case_ = self.get_dummy_inputs(_UpperCamelCase , pil_image=_UpperCamelCase )
snake_case_ = [
pipeline_inputs['''image'''],
pipeline_inputs['''image'''],
]
snake_case_ = pipe(**_UpperCamelCase )
snake_case_ = output.images
snake_case_ = self.get_dummy_inputs(_UpperCamelCase , pil_image=_UpperCamelCase )
snake_case_ = [
tuple_pipeline_inputs['''image'''],
tuple_pipeline_inputs['''image'''],
]
snake_case_ = pipe(
**_UpperCamelCase , return_dict=_UpperCamelCase , )[0]
snake_case_ = image[0, -3:, -3:, -1]
snake_case_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 6_4, 6_4, 3)
snake_case_ = np.array(
[
0.9997,
0.9989,
0.0008,
0.0021,
0.9960,
0.0018,
0.0014,
0.0002,
0.9933,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case__( self : Optional[Any] ) ->Any:
snake_case_ = torch.device('''cpu''' )
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = 1
snake_case_ = self.get_dummy_components()
snake_case_ = self.pipeline_class(**_UpperCamelCase )
snake_case_ = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
snake_case_ = torch.Generator(device=_UpperCamelCase ).manual_seed(0 )
snake_case_ = pipe.decoder.dtype
snake_case_ = 1
snake_case_ = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
snake_case_ = pipe.prepare_latents(
_UpperCamelCase , dtype=_UpperCamelCase , device=_UpperCamelCase , generator=_UpperCamelCase , latents=_UpperCamelCase , scheduler=DummyScheduler() )
snake_case_ = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
snake_case_ = pipe.prepare_latents(
_UpperCamelCase , dtype=_UpperCamelCase , device=_UpperCamelCase , generator=_UpperCamelCase , latents=_UpperCamelCase , scheduler=DummyScheduler() )
snake_case_ = self.get_dummy_inputs(_UpperCamelCase , pil_image=_UpperCamelCase )
snake_case_ = pipe(
**_UpperCamelCase , decoder_latents=_UpperCamelCase , super_res_latents=_UpperCamelCase ).images
snake_case_ = self.get_dummy_inputs(_UpperCamelCase , pil_image=_UpperCamelCase )
# Don't pass image, instead pass embedding
snake_case_ = pipeline_inputs.pop('''image''' )
snake_case_ = pipe.image_encoder(_UpperCamelCase ).image_embeds
snake_case_ = pipe(
**_UpperCamelCase , decoder_latents=_UpperCamelCase , super_res_latents=_UpperCamelCase , image_embeddings=_UpperCamelCase , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1e-4
@skip_mps
def snake_case__( self : Optional[Any] ) ->Any:
snake_case_ = torch_device == '''cpu'''
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
snake_case_ = 1e-2
self._test_attention_slicing_forward_pass(
test_max_difference=_UpperCamelCase , expected_max_diff=_UpperCamelCase )
@skip_mps
def snake_case__( self : Optional[int] ) ->Optional[Any]:
snake_case_ = torch_device == '''cpu'''
snake_case_ = True
snake_case_ = [
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
self._test_inference_batch_single_identical(
test_max_difference=_UpperCamelCase , relax_max_difference=_UpperCamelCase , additional_params_copy_to_batched_inputs=_UpperCamelCase , )
def snake_case__( self : List[Any] ) ->List[str]:
snake_case_ = [
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
snake_case_ = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=_UpperCamelCase , additional_params_copy_to_batched_inputs=_UpperCamelCase , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=_UpperCamelCase )
@skip_mps
def snake_case__( self : Dict ) ->Optional[int]:
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def snake_case__( self : Tuple ) ->List[Any]:
return super().test_save_load_local()
@skip_mps
def snake_case__( self : List[Any] ) ->Optional[Any]:
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : Dict ) ->int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__( self : Any ) ->Tuple:
snake_case_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png''' )
snake_case_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/unclip/karlo_v1_alpha_cat_variation_fp16.npy''' )
snake_case_ = UnCLIPImageVariationPipeline.from_pretrained(
'''kakaobrain/karlo-v1-alpha-image-variations''' , torch_dtype=torch.floataa )
snake_case_ = pipeline.to(_UpperCamelCase )
pipeline.set_progress_bar_config(disable=_UpperCamelCase )
snake_case_ = torch.Generator(device='''cpu''' ).manual_seed(0 )
snake_case_ = pipeline(
_UpperCamelCase , generator=_UpperCamelCase , output_type='''np''' , )
snake_case_ = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase , 1_5 ) | 39 |
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
if any(not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or x < 0 for x in sequence ):
raise TypeError('''Sequence must be list of non-negative integers''' )
for _ in range(len(SCREAMING_SNAKE_CASE__ ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(SCREAMING_SNAKE_CASE__ , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9] | 39 | 1 |
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class snake_case_ :
'''simple docstring'''
def __init__( self : Optional[int] , _UpperCamelCase : Dict , _UpperCamelCase : Union[str, Any]=1_4 , _UpperCamelCase : int=7 , _UpperCamelCase : Any=True , _UpperCamelCase : List[Any]=True , _UpperCamelCase : Any=False , _UpperCamelCase : int=True , _UpperCamelCase : List[str]=9_9 , _UpperCamelCase : Dict=3_2 , _UpperCamelCase : Dict=4 , _UpperCamelCase : Optional[int]=4 , _UpperCamelCase : List[Any]=4 , _UpperCamelCase : Dict=3_7 , _UpperCamelCase : int="gelu" , _UpperCamelCase : str=0.1 , _UpperCamelCase : Any=0.1 , _UpperCamelCase : Union[str, Any]=5_1_2 , _UpperCamelCase : Optional[int]=0.02 , ) ->List[Any]:
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = rotary_dim
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = initializer_range
snake_case_ = None
snake_case_ = vocab_size - 1
snake_case_ = vocab_size - 1
snake_case_ = vocab_size - 1
def snake_case__( self : Optional[int] ) ->List[Any]:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_input_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=_UpperCamelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def snake_case__( self : Dict ) ->Union[str, Any]:
snake_case_ = self.prepare_config_and_inputs()
snake_case_, snake_case_, snake_case_ = config_and_inputs
snake_case_ = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def snake_case__( self : Optional[int] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Tuple , _UpperCamelCase : Dict , _UpperCamelCase : Union[str, Any] ) ->Union[str, Any]:
snake_case_ = 2_0
snake_case_ = model_class_name(_UpperCamelCase )
snake_case_ = model.init_cache(input_ids.shape[0] , _UpperCamelCase )
snake_case_ = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
snake_case_ = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
snake_case_ = model(
input_ids[:, :-1] , attention_mask=_UpperCamelCase , past_key_values=_UpperCamelCase , position_ids=_UpperCamelCase , )
snake_case_ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''' )
snake_case_ = model(
input_ids[:, -1:] , attention_mask=_UpperCamelCase , past_key_values=outputs_cache.past_key_values , position_ids=_UpperCamelCase , )
snake_case_ = model(_UpperCamelCase )
snake_case_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
def snake_case__( self : str , _UpperCamelCase : Tuple , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[str] ) ->List[Any]:
snake_case_ = 2_0
snake_case_ = model_class_name(_UpperCamelCase )
snake_case_ = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
snake_case_ = model.init_cache(input_ids.shape[0] , _UpperCamelCase )
snake_case_ = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
snake_case_ = model(
input_ids[:, :-1] , attention_mask=_UpperCamelCase , past_key_values=_UpperCamelCase , position_ids=_UpperCamelCase , )
snake_case_ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''' )
snake_case_ = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=_UpperCamelCase , position_ids=_UpperCamelCase , )
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase )
snake_case_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
@require_flax
class snake_case_ ( __A , __A , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
SCREAMING_SNAKE_CASE : Optional[Any] = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def snake_case__( self : List[Any] ) ->Union[str, Any]:
snake_case_ = FlaxGPTJModelTester(self )
def snake_case__( self : int ) ->str:
for model_class_name in self.all_model_classes:
snake_case_, snake_case_, snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def snake_case__( self : List[Any] ) ->str:
for model_class_name in self.all_model_classes:
snake_case_, snake_case_, snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
@tooslow
def snake_case__( self : str ) ->Dict:
snake_case_ = GPTaTokenizer.from_pretrained('''gpt2''' , pad_token='''<|endoftext|>''' , padding_side='''left''' )
snake_case_ = tokenizer(['''Hello this is a long string''', '''Hey'''] , return_tensors='''np''' , padding=_UpperCamelCase , truncation=_UpperCamelCase )
snake_case_ = FlaxGPTJForCausalLM.from_pretrained('''EleutherAI/gpt-j-6B''' )
snake_case_ = False
snake_case_ = model.config.eos_token_id
snake_case_ = jax.jit(model.generate )
snake_case_ = jit_generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , pad_token_id=tokenizer.pad_token_id ).sequences
snake_case_ = tokenizer.batch_decode(_UpperCamelCase , skip_special_tokens=_UpperCamelCase )
snake_case_ = [
'''Hello this is a long string of text.\n\nI\'m trying to get the text of the''',
'''Hey, I\'m a little late to the party. I\'m going to''',
]
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
@is_pt_flax_cross_test
def snake_case__( self : List[str] ) ->List[str]:
snake_case_, snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
snake_case_ = self._prepare_for_class(_UpperCamelCase , _UpperCamelCase )
snake_case_ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
snake_case_ = model_class.__name__[4:] # Skip the "Flax" at the beginning
snake_case_ = getattr(_UpperCamelCase , _UpperCamelCase )
snake_case_, snake_case_ = pt_inputs['''input_ids'''].shape
snake_case_ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_UpperCamelCase ):
snake_case_ = 0
snake_case_ = 1
snake_case_ = 0
snake_case_ = 1
snake_case_ = pt_model_class(_UpperCamelCase ).eval()
snake_case_ = model_class(_UpperCamelCase , dtype=jnp.floataa )
snake_case_ = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , _UpperCamelCase )
snake_case_ = fx_state
with torch.no_grad():
snake_case_ = pt_model(**_UpperCamelCase ).to_tuple()
snake_case_ = fx_model(**_UpperCamelCase ).to_tuple()
self.assertEqual(len(_UpperCamelCase ) , len(_UpperCamelCase ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(_UpperCamelCase , _UpperCamelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(_UpperCamelCase )
snake_case_ = model_class.from_pretrained(_UpperCamelCase , from_pt=_UpperCamelCase )
snake_case_ = fx_model_loaded(**_UpperCamelCase ).to_tuple()
self.assertEqual(
len(_UpperCamelCase ) , len(_UpperCamelCase ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output_loaded, pt_output in zip(_UpperCamelCase , _UpperCamelCase ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@is_pt_flax_cross_test
def snake_case__( self : Optional[int] ) ->Dict:
snake_case_, snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
snake_case_ = self._prepare_for_class(_UpperCamelCase , _UpperCamelCase )
snake_case_ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
snake_case_ = model_class.__name__[4:] # Skip the "Flax" at the beginning
snake_case_ = getattr(_UpperCamelCase , _UpperCamelCase )
snake_case_ = pt_model_class(_UpperCamelCase ).eval()
snake_case_ = model_class(_UpperCamelCase , dtype=jnp.floataa )
snake_case_ = load_flax_weights_in_pytorch_model(_UpperCamelCase , fx_model.params )
snake_case_, snake_case_ = pt_inputs['''input_ids'''].shape
snake_case_ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_UpperCamelCase ):
snake_case_ = 0
snake_case_ = 1
snake_case_ = 0
snake_case_ = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
snake_case_ = pt_model(**_UpperCamelCase ).to_tuple()
snake_case_ = fx_model(**_UpperCamelCase ).to_tuple()
self.assertEqual(len(_UpperCamelCase ) , len(_UpperCamelCase ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(_UpperCamelCase , _UpperCamelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(_UpperCamelCase )
snake_case_ = pt_model_class.from_pretrained(_UpperCamelCase , from_flax=_UpperCamelCase )
with torch.no_grad():
snake_case_ = pt_model_loaded(**_UpperCamelCase ).to_tuple()
self.assertEqual(
len(_UpperCamelCase ) , len(_UpperCamelCase ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(_UpperCamelCase , _UpperCamelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@tooslow
def snake_case__( self : Optional[int] ) ->int:
for model_class_name in self.all_model_classes:
snake_case_ = model_class_name.from_pretrained('''EleutherAI/gpt-j-6B''' )
snake_case_ = model(np.ones((1, 1) ) )
self.assertIsNotNone(_UpperCamelCase ) | 39 |
import re
from filelock import FileLock
try:
import nltk
lowerCAmelCase_ = True
except (ImportError, ModuleNotFoundError):
lowerCAmelCase_ = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
re.sub('''<n>''' , '''''' , SCREAMING_SNAKE_CASE__ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(SCREAMING_SNAKE_CASE__ ) ) | 39 | 1 |
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class snake_case_ :
'''simple docstring'''
def __init__( self : Union[str, Any] , _UpperCamelCase : int , _UpperCamelCase : List[str]=1_3 , _UpperCamelCase : Optional[Any]=3_0 , _UpperCamelCase : Tuple=2 , _UpperCamelCase : Union[str, Any]=3 , _UpperCamelCase : Union[str, Any]=True , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : int=3_2 , _UpperCamelCase : Optional[int]=5 , _UpperCamelCase : Union[str, Any]=4 , _UpperCamelCase : int=3_7 , _UpperCamelCase : List[Any]="gelu" , _UpperCamelCase : Union[str, Any]=0.1 , _UpperCamelCase : Dict=0.1 , _UpperCamelCase : Dict=1_0 , _UpperCamelCase : Any=0.02 , _UpperCamelCase : Optional[int]=None , _UpperCamelCase : Union[str, Any]=2 , ) ->str:
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = is_training
snake_case_ = use_labels
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = scope
snake_case_ = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
snake_case_ = (image_size // patch_size) ** 2
snake_case_ = num_patches + 1
def snake_case__( self : List[Any] ) ->Optional[int]:
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = self.get_config()
return config, pixel_values, labels
def snake_case__( self : List[str] ) ->Optional[int]:
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def snake_case__( self : List[str] , _UpperCamelCase : List[str] , _UpperCamelCase : Any , _UpperCamelCase : Any ) ->Dict:
snake_case_ = ViTModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ = model(_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__( self : Dict , _UpperCamelCase : List[str] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int] ) ->List[Any]:
snake_case_ = ViTForMaskedImageModeling(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ = model(_UpperCamelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
snake_case_ = 1
snake_case_ = ViTForMaskedImageModeling(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case_ = model(_UpperCamelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def snake_case__( self : Union[str, Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : str , _UpperCamelCase : Any ) ->int:
snake_case_ = self.type_sequence_label_size
snake_case_ = ViTForImageClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ = model(_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case_ = 1
snake_case_ = ViTForImageClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case_ = model(_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def snake_case__( self : List[Any] ) ->Tuple:
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
), (
snake_case_
), (
snake_case_
),
) = config_and_inputs
snake_case_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class snake_case_ ( __A , __A , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE : Any = (
{"feature-extraction": ViTModel, "image-classification": ViTForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE : int = True
SCREAMING_SNAKE_CASE : Union[str, Any] = False
SCREAMING_SNAKE_CASE : int = False
SCREAMING_SNAKE_CASE : str = False
def snake_case__( self : Tuple ) ->int:
snake_case_ = ViTModelTester(self )
snake_case_ = ConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase , hidden_size=3_7 )
def snake_case__( self : Dict ) ->Any:
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def snake_case__( self : str ) ->int:
pass
def snake_case__( self : int ) ->Tuple:
snake_case_, snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(_UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCamelCase , nn.Linear ) )
def snake_case__( self : Any ) ->Any:
snake_case_, snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(_UpperCamelCase )
snake_case_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ = [*signature.parameters.keys()]
snake_case_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
def snake_case__( self : Union[str, Any] ) ->str:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def snake_case__( self : Union[str, Any] ) ->Optional[int]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_UpperCamelCase )
def snake_case__( self : Union[str, Any] ) ->str:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCamelCase )
@slow
def snake_case__( self : List[str] ) ->int:
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = ViTModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ():
snake_case_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case__( self : Union[str, Any] ) ->Tuple:
return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None
@slow
def snake_case__( self : str ) ->Any:
snake_case_ = ViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' ).to(_UpperCamelCase )
snake_case_ = self.default_image_processor
snake_case_ = prepare_img()
snake_case_ = image_processor(images=_UpperCamelCase , return_tensors='''pt''' ).to(_UpperCamelCase )
# forward pass
with torch.no_grad():
snake_case_ = model(**_UpperCamelCase )
# verify the logits
snake_case_ = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
snake_case_ = torch.tensor([-0.2744, 0.8215, -0.0836] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCamelCase , atol=1e-4 ) )
@slow
def snake_case__( self : List[str] ) ->str:
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
snake_case_ = ViTModel.from_pretrained('''facebook/dino-vits8''' ).to(_UpperCamelCase )
snake_case_ = ViTImageProcessor.from_pretrained('''facebook/dino-vits8''' , size=4_8_0 )
snake_case_ = prepare_img()
snake_case_ = image_processor(images=_UpperCamelCase , return_tensors='''pt''' )
snake_case_ = inputs.pixel_values.to(_UpperCamelCase )
# forward pass
with torch.no_grad():
snake_case_ = model(_UpperCamelCase , interpolate_pos_encoding=_UpperCamelCase )
# verify the logits
snake_case_ = torch.Size((1, 3_6_0_1, 3_8_4) )
self.assertEqual(outputs.last_hidden_state.shape , _UpperCamelCase )
snake_case_ = torch.tensor(
[[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , _UpperCamelCase , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def snake_case__( self : List[Any] ) ->Tuple:
snake_case_ = ViTModel.from_pretrained('''facebook/dino-vits8''' , torch_dtype=torch.floataa , device_map='''auto''' )
snake_case_ = self.default_image_processor
snake_case_ = prepare_img()
snake_case_ = image_processor(images=_UpperCamelCase , return_tensors='''pt''' )
snake_case_ = inputs.pixel_values.to(_UpperCamelCase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
snake_case_ = model(_UpperCamelCase ) | 39 |
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = [0 for i in range(r + 1 )]
# nc0 = 1
snake_case_ = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
snake_case_ = min(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5)) | 39 | 1 |
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
lowerCAmelCase_ = logging.getLogger()
def __SCREAMING_SNAKE_CASE ():
snake_case_ = argparse.ArgumentParser()
parser.add_argument('''-f''' )
snake_case_ = parser.parse_args()
return args.f
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = {}
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''all_results.json''' )
if os.path.exists(SCREAMING_SNAKE_CASE__ ):
with open(SCREAMING_SNAKE_CASE__ , '''r''' ) as f:
snake_case_ = json.load(SCREAMING_SNAKE_CASE__ )
else:
raise ValueError(F'''can\'t find {path}''' )
return results
def __SCREAMING_SNAKE_CASE ():
snake_case_ = torch.cuda.is_available() and torch_device == '''cuda'''
return is_using_cuda and is_apex_available()
lowerCAmelCase_ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class snake_case_ ( __A ):
'''simple docstring'''
@classmethod
def snake_case__( cls : Tuple ) ->Optional[int]:
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
snake_case_ = tempfile.mkdtemp()
snake_case_ = os.path.join(cls.tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
snake_case_ = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def snake_case__( cls : List[Any] ) ->Union[str, Any]:
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__( self : Union[str, Any] ) ->Tuple:
snake_case_ = self.get_auto_remove_tmp_dir()
snake_case_ = f'''
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
'''.split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
snake_case_ = get_results(_UpperCamelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''glue_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__( self : List[Any] ) ->Union[str, Any]:
snake_case_ = self.get_auto_remove_tmp_dir()
snake_case_ = f'''
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
'''.split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
snake_case_ = get_results(_UpperCamelCase )
self.assertLess(result['''perplexity'''] , 1_0_0 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''clm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__( self : List[Any] ) ->int:
snake_case_ = self.get_auto_remove_tmp_dir()
snake_case_ = f'''
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
snake_case_ = get_results(_UpperCamelCase )
self.assertLess(result['''perplexity'''] , 4_2 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''mlm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__( self : List[str] ) ->List[Any]:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
snake_case_ = 7 if get_gpu_count() > 1 else 2
snake_case_ = self.get_auto_remove_tmp_dir()
snake_case_ = f'''
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
snake_case_ = get_results(_UpperCamelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertLess(result['''train_loss'''] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''ner_no_trainer''' ) ) )
@unittest.skip(reason='''Fix me @muellerzr''' )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__( self : List[str] ) ->List[str]:
snake_case_ = self.get_auto_remove_tmp_dir()
snake_case_ = f'''
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
snake_case_ = get_results(_UpperCamelCase )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result['''eval_f1'''] , 2_8 )
self.assertGreaterEqual(result['''eval_exact'''] , 2_8 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''qa_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__( self : int ) ->List[str]:
snake_case_ = self.get_auto_remove_tmp_dir()
snake_case_ = f'''
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
snake_case_ = get_results(_UpperCamelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''swag_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__( self : List[Any] ) ->List[Any]:
snake_case_ = self.get_auto_remove_tmp_dir()
snake_case_ = f'''
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
snake_case_ = get_results(_UpperCamelCase )
self.assertGreaterEqual(result['''eval_rouge1'''] , 1_0 )
self.assertGreaterEqual(result['''eval_rouge2'''] , 2 )
self.assertGreaterEqual(result['''eval_rougeL'''] , 7 )
self.assertGreaterEqual(result['''eval_rougeLsum'''] , 7 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''summarization_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__( self : Union[str, Any] ) ->str:
snake_case_ = self.get_auto_remove_tmp_dir()
snake_case_ = f'''
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
snake_case_ = get_results(_UpperCamelCase )
self.assertGreaterEqual(result['''eval_bleu'''] , 3_0 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''translation_no_trainer''' ) ) )
@slow
def snake_case__( self : List[Any] ) ->Dict:
snake_case_ = logging.StreamHandler(sys.stdout )
logger.addHandler(_UpperCamelCase )
snake_case_ = self.get_auto_remove_tmp_dir()
snake_case_ = f'''
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
'''.split()
run_command(self._launch_args + testargs )
snake_case_ = get_results(_UpperCamelCase )
self.assertGreaterEqual(result['''eval_overall_accuracy'''] , 0.10 )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__( self : List[Any] ) ->Optional[int]:
snake_case_ = self.get_auto_remove_tmp_dir()
snake_case_ = f'''
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
'''.split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
snake_case_ = get_results(_UpperCamelCase )
# The base model scores a 25%
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''step_1''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''image_classification_no_trainer''' ) ) ) | 39 |
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
lowerCAmelCase_ = {
'''gwf-440k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/gwf-440k.ckpt''',
'''sample_rate''': 4_80_00,
'''sample_size''': 6_55_36,
},
'''jmann-small-190k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt''',
'''sample_rate''': 4_80_00,
'''sample_size''': 6_55_36,
},
'''jmann-large-580k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt''',
'''sample_rate''': 4_80_00,
'''sample_size''': 13_10_72,
},
'''maestro-uncond-150k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt''',
'''sample_rate''': 1_60_00,
'''sample_size''': 6_55_36,
},
'''unlocked-uncond-250k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt''',
'''sample_rate''': 1_60_00,
'''sample_size''': 6_55_36,
},
'''honk-140k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/honk-140k.ckpt''',
'''sample_rate''': 1_60_00,
'''sample_size''': 6_55_36,
},
}
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return torch.atana(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) / math.pi * 2
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = torch.sin(t * math.pi / 2 ) ** 2
snake_case_ = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
class snake_case_ ( __A ):
'''simple docstring'''
pass
class snake_case_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , _UpperCamelCase : int ) ->Optional[int]:
super().__init__()
snake_case_ = DiffusionAttnUnetaD(_UpperCamelCase , n_attn_layers=4 )
snake_case_ = deepcopy(self.diffusion )
snake_case_ = torch.quasirandom.SobolEngine(1 , scramble=_UpperCamelCase )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = MODELS_MAP[model_name]['''url''']
os.system(F'''wget {url} ./''' )
return F'''./{model_name}.ckpt'''
lowerCAmelCase_ = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
}
lowerCAmelCase_ = {
'''8''': '''resnets.0''',
'''9''': '''attentions.0''',
'''10''': '''resnets.1''',
'''11''': '''attentions.1''',
'''12''': '''resnets.2''',
'''13''': '''attentions.2''',
}
lowerCAmelCase_ = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
'''8''': '''resnets.3''',
'''9''': '''attentions.3''',
'''10''': '''resnets.4''',
'''11''': '''attentions.4''',
'''12''': '''resnets.5''',
'''13''': '''attentions.5''',
}
lowerCAmelCase_ = {
'''0''': '''resnets.0''',
'''1''': '''resnets.1''',
'''2''': '''resnets.2''',
'''4''': '''resnets.0''',
'''5''': '''resnets.1''',
'''6''': '''resnets.2''',
}
lowerCAmelCase_ = {
'''skip''': '''conv_skip''',
'''main.0''': '''conv_1''',
'''main.1''': '''group_norm_1''',
'''main.3''': '''conv_2''',
'''main.4''': '''group_norm_2''',
}
lowerCAmelCase_ = {
'''norm''': '''group_norm''',
'''qkv_proj''': ['''query''', '''key''', '''value'''],
'''out_proj''': ['''proj_attn'''],
}
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
if name.startswith('''skip''' ):
return name.replace('''skip''' , RES_CONV_MAP['''skip'''] )
# name has to be of format main.{digit}
if not name.startswith('''main.''' ):
raise ValueError(F'''ResConvBlock error with {name}''' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
for key, value in ATTN_MAP.items():
if name.startswith(SCREAMING_SNAKE_CASE__ ) and not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return name.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif name.startswith(SCREAMING_SNAKE_CASE__ ):
return [name.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for v in value]
raise ValueError(F'''Attn error with {name}''' )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=13 ):
snake_case_ = input_string
if string.split('''.''' )[0] == "timestep_embed":
return string.replace('''timestep_embed''' , '''time_proj''' )
snake_case_ = 0
if string.startswith('''net.3.''' ):
depth += 1
snake_case_ = string[6:]
elif string.startswith('''net.''' ):
snake_case_ = string[4:]
while string.startswith('''main.7.''' ):
depth += 1
snake_case_ = string[7:]
if string.startswith('''main.''' ):
snake_case_ = string[5:]
# mid block
if string[:2].isdigit():
snake_case_ = string[:2]
snake_case_ = string[2:]
else:
snake_case_ = string[0]
snake_case_ = string[1:]
if depth == max_depth:
snake_case_ = MID_NUM_TO_LAYER[layer_num]
snake_case_ = '''mid_block'''
elif depth > 0 and int(SCREAMING_SNAKE_CASE__ ) < 7:
snake_case_ = DOWN_NUM_TO_LAYER[layer_num]
snake_case_ = F'''down_blocks.{depth}'''
elif depth > 0 and int(SCREAMING_SNAKE_CASE__ ) > 7:
snake_case_ = UP_NUM_TO_LAYER[layer_num]
snake_case_ = F'''up_blocks.{max_depth - depth - 1}'''
elif depth == 0:
snake_case_ = DEPTH_0_TO_LAYER[layer_num]
snake_case_ = F'''up_blocks.{max_depth - 1}''' if int(SCREAMING_SNAKE_CASE__ ) > 3 else '''down_blocks.0'''
if not string_left.startswith('''.''' ):
raise ValueError(F'''Naming error with {input_string} and string_left: {string_left}.''' )
snake_case_ = string_left[1:]
if "resnets" in new_layer:
snake_case_ = convert_resconv_naming(SCREAMING_SNAKE_CASE__ )
elif "attentions" in new_layer:
snake_case_ = convert_attn_naming(SCREAMING_SNAKE_CASE__ )
snake_case_ = new_string_left
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = prefix + '''.''' + new_layer + '''.''' + string_left
else:
snake_case_ = [prefix + '''.''' + new_layer + '''.''' + s for s in string_left]
return new_string
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = {}
for k, v in state_dict.items():
if k.endswith('''kernel''' ):
# up- and downsample layers, don't have trainable weights
continue
snake_case_ = rename(SCREAMING_SNAKE_CASE__ )
# check if we need to transform from Conv => Linear for attention
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = transform_conv_attns(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
snake_case_ = v
return new_state_dict
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if len(SCREAMING_SNAKE_CASE__ ) == 1:
if len(v.shape ) == 3:
# weight
snake_case_ = v[:, :, 0]
else:
# bias
snake_case_ = v
else:
# qkv matrices
snake_case_ = v.shape[0]
snake_case_ = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
snake_case_ = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
snake_case_ = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
snake_case_ = args.model_path.split('''/''' )[-1].split('''.''' )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), F'''Make sure to provide one of the official model names {MODELS_MAP.keys()}'''
snake_case_ = download(SCREAMING_SNAKE_CASE__ )
snake_case_ = MODELS_MAP[model_name]['''sample_rate''']
snake_case_ = MODELS_MAP[model_name]['''sample_size''']
snake_case_ = Object()
snake_case_ = sample_size
snake_case_ = sample_rate
snake_case_ = 0
snake_case_ = UNetaDModel(sample_size=SCREAMING_SNAKE_CASE__ , sample_rate=SCREAMING_SNAKE_CASE__ )
snake_case_ = diffusers_model.state_dict()
snake_case_ = DiffusionUncond(SCREAMING_SNAKE_CASE__ )
orig_model.load_state_dict(torch.load(args.model_path , map_location=SCREAMING_SNAKE_CASE__ )['''state_dict'''] )
snake_case_ = orig_model.diffusion_ema.eval()
snake_case_ = orig_model.state_dict()
snake_case_ = rename_orig_weights(SCREAMING_SNAKE_CASE__ )
snake_case_ = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
snake_case_ = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(SCREAMING_SNAKE_CASE__ ) == 0, F'''Problem with {renamed_minus_diffusers}'''
assert all(k.endswith('''kernel''' ) for k in list(SCREAMING_SNAKE_CASE__ ) ), F'''Problem with {diffusers_minus_renamed}'''
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), F'''Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'''
if key == "time_proj.weight":
snake_case_ = value.squeeze()
snake_case_ = value
diffusers_model.load_state_dict(SCREAMING_SNAKE_CASE__ )
snake_case_ = 100
snake_case_ = 33
snake_case_ = IPNDMScheduler(num_train_timesteps=SCREAMING_SNAKE_CASE__ )
snake_case_ = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
snake_case_ = torch.randn([1, 2, config.sample_size] , generator=SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
snake_case_ = torch.linspace(1 , 0 , steps + 1 , device=SCREAMING_SNAKE_CASE__ )[:-1]
snake_case_ = get_crash_schedule(SCREAMING_SNAKE_CASE__ )
snake_case_ = DanceDiffusionPipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
snake_case_ = torch.manual_seed(33 )
snake_case_ = pipe(num_inference_steps=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ ).audios
snake_case_ = sampling.iplms_sample(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , {} )
snake_case_ = generated.clamp(-1 , 1 )
snake_case_ = (generated - audio).abs().sum()
snake_case_ = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print('''Diff sum''' , SCREAMING_SNAKE_CASE__ )
print('''Diff max''' , SCREAMING_SNAKE_CASE__ )
assert diff_max < 1E-3, F'''Diff max: {diff_max} is too much :-/'''
print(F'''Conversion for {model_name} successful!''' )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
lowerCAmelCase_ = parser.parse_args()
main(args) | 39 | 1 |
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
lowerCAmelCase_ = logging.getLogger(__name__)
def __SCREAMING_SNAKE_CASE ():
snake_case_ = argparse.ArgumentParser(
description='''Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.''' )
parser.add_argument(
'''--dataset_name''' , type=SCREAMING_SNAKE_CASE__ , default='''wikitext''' , help='''Name of the training. Explore datasets at: hf.co/datasets.''' , )
parser.add_argument(
'''--dataset_config''' , type=SCREAMING_SNAKE_CASE__ , default='''wikitext-103-raw-v1''' , help='''Configuration name of the dataset.''' )
parser.add_argument(
'''--tokenizer_name_or_path''' , type=SCREAMING_SNAKE_CASE__ , default='''sayakpaul/unigram-tokenizer-wikitext''' , help='''Tokenizer identifier. Can be a local filepath or a Hub identifier.''' , )
parser.add_argument(
'''--shard_size''' , type=SCREAMING_SNAKE_CASE__ , default=1000 , help='''Number of entries to go in a single shard.''' , )
parser.add_argument('''--split''' , type=SCREAMING_SNAKE_CASE__ , default='''train''' , choices=['''train''', '''test''', '''validation'''] )
parser.add_argument(
'''--limit''' , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , help='''Limit the number of shards (used for debugging).''' , )
parser.add_argument(
'''--max_length''' , type=SCREAMING_SNAKE_CASE__ , default=512 , help='''Maximum sequence length. For training on TPUs, it helps to have a maximum'''
''' sequence length that is a multiple of 8.''' , )
parser.add_argument(
'''--output_dir''' , default='''tf-tpu''' , type=SCREAMING_SNAKE_CASE__ , help='''Output directory where the TFRecord shards will be saved. If the'''
''' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'''
''' shards will be directly saved to a Google Cloud Storage bucket.''' , )
snake_case_ = parser.parse_args()
return args
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
def fn(SCREAMING_SNAKE_CASE__ ):
return tokenizer(examples['''text'''] )
return fn
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = []
for i in range(len(tokenized_data['''input_ids'''] ) ):
snake_case_ = {
'''input_ids''': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['''input_ids'''][i] ) ),
'''attention_mask''': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['''attention_mask'''][i] ) ),
}
snake_case_ = tf.train.Features(feature=SCREAMING_SNAKE_CASE__ )
snake_case_ = tf.train.Example(features=SCREAMING_SNAKE_CASE__ )
snake_case_ = example.SerializeToString()
records.append(SCREAMING_SNAKE_CASE__ )
return records
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
snake_case_ = min(len(SCREAMING_SNAKE_CASE__ ) , args.limit )
snake_case_ = dataset.select(range(SCREAMING_SNAKE_CASE__ ) )
print(F'''Limiting the dataset to {args.limit} entries.''' )
snake_case_ = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
snake_case_ = os.path.join(args.output_dir , args.split )
if not os.path.exists(SCREAMING_SNAKE_CASE__ ):
os.makedirs(SCREAMING_SNAKE_CASE__ )
else:
snake_case_ = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
snake_case_ = tokenize_function(SCREAMING_SNAKE_CASE__ )
snake_case_ = dataset.map(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , num_proc=4 , remove_columns=['''text'''] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(SCREAMING_SNAKE_CASE__ ):
# Concatenate all texts.
snake_case_ = {k: sum(examples[k] , [] ) for k in examples.keys()}
snake_case_ = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
snake_case_ = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
snake_case_ = {
k: [t[i : i + args.max_length] for i in range(0 , SCREAMING_SNAKE_CASE__ , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
snake_case_ = dataset_tokenized.map(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , batch_size=1000 , num_proc=4 )
snake_case_ = 0
snake_case_ = 0
for shard in range(0 , len(SCREAMING_SNAKE_CASE__ ) , args.shard_size ):
snake_case_ = grouped_dataset[shard : shard + args.shard_size]
snake_case_ = len(dataset_snapshot['''input_ids'''] )
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , F'''dataset-{shard_count}-{records_containing}.tfrecord''' )
snake_case_ = get_serialized_examples(SCREAMING_SNAKE_CASE__ )
with tf.io.TFRecordWriter(SCREAMING_SNAKE_CASE__ ) as out_file:
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
snake_case_ = serialized_examples[i]
out_file.write(SCREAMING_SNAKE_CASE__ )
print('''Wrote file {} containing {} records'''.format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
shard_count += 1
total_records += records_containing
with open(F'''split-{args.split}-records-count.txt''' , '''w''' ) as f:
print(F'''Total {args.split} records: {total_records}''' , file=SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowerCAmelCase_ = parse_args()
main(args) | 39 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {'''configuration_vit_msn''': ['''VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMSNConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMSNModel''',
'''ViTMSNForImageClassification''',
'''ViTMSNPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 39 | 1 |
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = False ):
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3317044064679887385961981 and not allow_probable:
raise ValueError(
'''Warning: upper bound of deterministic test is exceeded. '''
'''Pass allow_probable=True to allow probabilistic test. '''
'''A return value of True indicates a probable prime.''' )
# array bounds provided by analysis
snake_case_ = [
2047,
1373653,
25326001,
3215031751,
2152302898747,
3474749660383,
341550071728321,
1,
3825123056546413051,
1,
1,
318665857834031151167461,
3317044064679887385961981,
]
snake_case_ = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(SCREAMING_SNAKE_CASE__ , 1 ):
if n < _p:
# then we have our last prime to check
snake_case_ = primes[:idx]
break
snake_case_, snake_case_ = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
snake_case_ = False
for r in range(SCREAMING_SNAKE_CASE__ ):
snake_case_ = pow(SCREAMING_SNAKE_CASE__ , d * 2**r , SCREAMING_SNAKE_CASE__ )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
snake_case_ = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def __SCREAMING_SNAKE_CASE ():
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(838201 )
assert miller_rabin(838207 )
# 1_373_653
assert not miller_rabin(17316001 )
assert miller_rabin(17316017 )
# 25_326_001
assert not miller_rabin(3078386641 )
assert miller_rabin(3078386653 )
# 3_215_031_751
assert not miller_rabin(1713045574801 )
assert miller_rabin(1713045574819 )
# 2_152_302_898_747
assert not miller_rabin(2779799728307 )
assert miller_rabin(2779799728327 )
# 3_474_749_660_383
assert not miller_rabin(113850023909441 )
assert miller_rabin(113850023909527 )
# 341_550_071_728_321
assert not miller_rabin(1275041018848804351 )
assert miller_rabin(1275041018848804391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(79666464458507787791867 )
assert miller_rabin(79666464458507787791951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(552840677446647897660333 )
assert miller_rabin(552840677446647897660359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin() | 39 |
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : Optional[Any] ) ->Any:
snake_case_ = tf.convert_to_tensor(
[
[
8.2220991, # 3rd highest value; idx. 0
-0.5620044,
5.23229752,
4.0386393,
-6.8798378,
-0.54785802,
-3.2012153,
2.92777176,
1.88171953,
7.35341276, # 5th highest value; idx. 9
8.43207833, # 2nd highest value; idx. 10
-9.85711836,
-5.96209236,
-1.13039161,
-7.1115294,
-0.8369633,
-5.3186408,
7.06427407,
0.81369344,
-0.82023817,
-5.9179796,
0.58813443,
-6.99778438,
4.71551189,
-0.18771637,
7.44020759, # 4th highest value; idx. 25
9.38450987, # 1st highest value; idx. 26
2.12662941,
-9.32562038,
2.35652522,
], # cummulative prob of 5 highest values <= 0.6
[
0.58425518,
4.53139238,
-5.57510464,
-6.28030699,
-7.19529503,
-4.02122551,
1.39337037,
-6.06707057,
1.59480517,
-9.643119,
0.03907799,
0.67231762,
-8.88206726,
6.27115922, # 4th highest value; idx. 13
2.28520723,
4.82767506,
4.30421368,
8.8275313, # 2nd highest value; idx. 17
5.44029958, # 5th highest value; idx. 18
-4.4735794,
7.38579536, # 3rd highest value; idx. 20
-2.91051663,
2.61946077,
-2.5674762,
-9.48959302,
-4.02922645,
-1.35416918,
9.67702323, # 1st highest value; idx. 27
-5.89478553,
1.85370467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
snake_case_ = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 1_0], [0, 2_5], [0, 2_6], [1, 1_3], [1, 1_7], [1, 1_8], [1, 2_0], [1, 2_7]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
snake_case_ = tf.convert_to_tensor(
[8.222099, 7.3534126, 8.432078, 7.4402075, 9.38451, 6.271159, 8.827531, 5.4402995, 7.3857956, 9.677023] , dtype=tf.floataa , ) # expected non filtered values as noted above
snake_case_ = tf_top_k_top_p_filtering(_UpperCamelCase , top_k=1_0 , top_p=0.6 , min_tokens_to_keep=4 )
snake_case_ = output[output != -float('''inf''' )]
snake_case_ = tf.cast(
tf.where(tf.not_equal(_UpperCamelCase , tf.constant(-float('''inf''' ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(_UpperCamelCase , _UpperCamelCase , rtol=1e-12 )
tf.debugging.assert_equal(_UpperCamelCase , _UpperCamelCase )
@require_tf
class snake_case_ ( unittest.TestCase , __A ):
'''simple docstring'''
if is_tf_available():
SCREAMING_SNAKE_CASE : Optional[int] = {
"AutoModelForCausalLM": TFAutoModelForCausalLM,
"AutoModelForSpeechSeq2Seq": TFAutoModelForSpeechSeqaSeq,
"AutoModelForSeq2SeqLM": TFAutoModelForSeqaSeqLM,
"AutoModelForVision2Seq": TFAutoModelForVisionaSeq,
"LogitsProcessorList": TFLogitsProcessorList,
"MinLengthLogitsProcessor": TFMinLengthLogitsProcessor,
"create_tensor_fn": tf.convert_to_tensor,
"floats_tensor": floats_tensor,
"return_tensors": "tf",
}
@slow
def snake_case__( self : List[Any] ) ->Optional[int]:
# TF-only test: tf.saved_model export
snake_case_ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
snake_case_ = 2
snake_case_ = 2
class snake_case_ ( tf.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , _UpperCamelCase : Optional[int] ) ->List[Any]:
super(_UpperCamelCase , self ).__init__()
snake_case_ = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((None, input_length) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=_UpperCamelCase , )
def snake_case__( self : List[Any] , _UpperCamelCase : int , _UpperCamelCase : Union[str, Any] ) ->List[Any]:
snake_case_ = self.model.generate(
input_ids=_UpperCamelCase , attention_mask=_UpperCamelCase , max_new_tokens=_UpperCamelCase , return_dict_in_generate=_UpperCamelCase , )
return {"sequences": outputs["sequences"]}
snake_case_ = [[2, 0], [1_0_2, 1_0_3]]
snake_case_ = [[1, 0], [1, 1]]
snake_case_ = DummyModel(model=_UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(_UpperCamelCase , _UpperCamelCase , signatures={'''serving_default''': dummy_model.serving} )
snake_case_ = tf.saved_model.load(_UpperCamelCase ).signatures['''serving_default''']
for batch_size in range(1 , len(_UpperCamelCase ) + 1 ):
snake_case_ = {
'''input_ids''': tf.constant(dummy_input_ids[:batch_size] ),
'''attention_mask''': tf.constant(dummy_attention_masks[:batch_size] ),
}
snake_case_ = serving_func(**_UpperCamelCase )['''sequences''']
snake_case_ = test_model.generate(**_UpperCamelCase , max_new_tokens=_UpperCamelCase )
tf.debugging.assert_equal(_UpperCamelCase , _UpperCamelCase )
@slow
def snake_case__( self : List[str] ) ->int:
# TF-only test: tf.saved_model export
snake_case_ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
snake_case_ = 1
snake_case_ = 2
class snake_case_ ( tf.Module ):
'''simple docstring'''
def __init__( self : str , _UpperCamelCase : Any ) ->List[str]:
super(_UpperCamelCase , self ).__init__()
snake_case_ = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((batch_size, None) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=_UpperCamelCase , )
def snake_case__( self : int , _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] ) ->Optional[int]:
snake_case_ = self.model.generate(
input_ids=_UpperCamelCase , attention_mask=_UpperCamelCase , max_new_tokens=_UpperCamelCase , return_dict_in_generate=_UpperCamelCase , )
return {"sequences": outputs["sequences"]}
snake_case_ = [[2], [1_0_2, 1_0_3]]
snake_case_ = [[1], [1, 1]]
snake_case_ = DummyModel(model=_UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(_UpperCamelCase , _UpperCamelCase , signatures={'''serving_default''': dummy_model.serving} )
snake_case_ = tf.saved_model.load(_UpperCamelCase ).signatures['''serving_default''']
for input_row in range(len(_UpperCamelCase ) ):
snake_case_ = {
'''input_ids''': tf.constant([dummy_input_ids[input_row]] ),
'''attention_mask''': tf.constant([dummy_attention_masks[input_row]] ),
}
snake_case_ = serving_func(**_UpperCamelCase )['''sequences''']
snake_case_ = test_model.generate(**_UpperCamelCase , max_new_tokens=_UpperCamelCase )
tf.debugging.assert_equal(_UpperCamelCase , _UpperCamelCase )
@slow
@require_tensorflow_text
def snake_case__( self : Optional[Any] ) ->List[Any]:
# TF-only test: tf.saved_model export
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id='''google/flan-t5-small''' , filename='''spiece.model''' , local_dir=_UpperCamelCase )
class snake_case_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Tuple ) ->List[Any]:
super().__init__()
snake_case_ = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(_UpperCamelCase , '''spiece.model''' ) , '''rb''' ).read() )
snake_case_ = TFAutoModelForSeqaSeqLM.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
def snake_case__( self : Optional[Any] , _UpperCamelCase : List[Any] , *_UpperCamelCase : Optional[int] , **_UpperCamelCase : str ) ->List[Any]:
snake_case_ = self.tokenizer.tokenize(_UpperCamelCase )
snake_case_, snake_case_ = text.pad_model_inputs(
_UpperCamelCase , max_seq_length=6_4 , pad_value=self.model.config.pad_token_id )
snake_case_ = self.model.generate(input_ids=_UpperCamelCase , attention_mask=_UpperCamelCase )
return self.tokenizer.detokenize(_UpperCamelCase )
snake_case_ = CompleteSentenceTransformer()
snake_case_ = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='''inputs''' )
snake_case_ = complete_model(_UpperCamelCase )
snake_case_ = tf.keras.Model(_UpperCamelCase , _UpperCamelCase )
keras_model.save(_UpperCamelCase )
def snake_case__( self : Any ) ->List[Any]:
# Has PT equivalent: this test relies on random sampling
snake_case_ = {
'''do_sample''': True,
'''num_beams''': 1,
'''top_p''': 0.7,
'''top_k''': 1_0,
'''temperature''': 0.7,
}
snake_case_ = 1_4
snake_case_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
snake_case_ = '''Hello, my dog is cute and'''
snake_case_ = tokenizer(_UpperCamelCase , return_tensors='''tf''' )
snake_case_ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
snake_case_ = 6_3_8
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
snake_case_ = model.generate(**_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
self.assertTrue(expectation == len(generated_tokens[0] ) )
snake_case_ = [6_3_8, 1_9_8]
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
snake_case_ = model.generate(**_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def snake_case__( self : str ) ->Dict:
# Has PT equivalent: ample use of framework-specific code
snake_case_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
snake_case_ = '''Hugging Face is a technology company based in New York and Paris.'''
snake_case_ = bart_tokenizer(_UpperCamelCase , return_tensors='''tf''' ).input_ids
snake_case_ = TFBartForConditionalGeneration.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
snake_case_ = bart_model.generate(_UpperCamelCase ).numpy()
class snake_case_ ( __A ):
'''simple docstring'''
def snake_case__( self : str , _UpperCamelCase : Any , _UpperCamelCase : Tuple=None , **_UpperCamelCase : Optional[int] ) ->List[str]:
return super().call(_UpperCamelCase , **_UpperCamelCase )
snake_case_ = FakeBart.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
snake_case_ = bart_model.generate(_UpperCamelCase , foo='''bar''' ).numpy()
self.assertTrue(np.array_equal(_UpperCamelCase , _UpperCamelCase ) )
class snake_case_ ( bart_model.model.encoder.__class__ ):
'''simple docstring'''
def snake_case__( self : Union[str, Any] , _UpperCamelCase : str , **_UpperCamelCase : Tuple ) ->Optional[Any]:
return super().call(_UpperCamelCase , **_UpperCamelCase )
snake_case_ = FakeEncoder(bart_model.config , bart_model.model.shared )
snake_case_ = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
snake_case_ = bart_model.generate(_UpperCamelCase ).numpy()
with self.assertRaises(_UpperCamelCase ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(_UpperCamelCase , foo='''bar''' ) | 39 | 1 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCAmelCase_ = abspath(join(dirname(__file__), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
config.addinivalue_line(
'''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' )
config.addinivalue_line(
'''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' )
config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' )
config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' )
config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' )
config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(SCREAMING_SNAKE_CASE__ )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
from transformers.testing_utils import pytest_terminal_summary_main
snake_case_ = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(SCREAMING_SNAKE_CASE__ , id=SCREAMING_SNAKE_CASE__ )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
snake_case_ = 0
# Doctest custom flag to ignore output.
lowerCAmelCase_ = doctest.register_optionflag('''IGNORE_RESULT''')
lowerCAmelCase_ = doctest.OutputChecker
class snake_case_ ( __A ):
'''simple docstring'''
def snake_case__( self : int , _UpperCamelCase : Dict , _UpperCamelCase : Any , _UpperCamelCase : Any ) ->List[Any]:
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowerCAmelCase_ = CustomOutputChecker
lowerCAmelCase_ = HfDoctestModule
lowerCAmelCase_ = HfDocTestParser | 39 |
import unittest
from transformers import DonutProcessor
lowerCAmelCase_ = '''naver-clova-ix/donut-base'''
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : Union[str, Any] ) ->Any:
snake_case_ = DonutProcessor.from_pretrained(_UpperCamelCase )
def snake_case__( self : Dict ) ->str:
snake_case_ = {
'''name''': '''John Doe''',
'''age''': '''99''',
'''city''': '''Atlanta''',
'''state''': '''GA''',
'''zip''': '''30301''',
'''phone''': '''123-4567''',
'''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}],
}
snake_case_ = (
'''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'''
'''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'''
'''<s_nicknames><s_nickname>Johnny</s_nickname>'''
'''<sep/><s_nickname>JD</s_nickname></s_nicknames>'''
)
snake_case_ = self.processor.tokenajson(_UpperCamelCase )
self.assertDictEqual(_UpperCamelCase , _UpperCamelCase ) | 39 | 1 |
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
lowerCAmelCase_ = logging.getLogger(__name__)
@dataclass
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[float] = field(
default=0.0 , metadata={"help": "The label smoothing epsilon to apply (if not zero)."} )
SCREAMING_SNAKE_CASE : bool = field(default=__A , metadata={"help": "Whether to SortishSamler or not."} )
SCREAMING_SNAKE_CASE : bool = field(
default=__A , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
SCREAMING_SNAKE_CASE : bool = field(default=__A , metadata={"help": "whether to use adafactor"} )
SCREAMING_SNAKE_CASE : Optional[float] = field(
default=__A , metadata={"help": "Encoder layer dropout probability. Goes into model.config."} )
SCREAMING_SNAKE_CASE : Optional[float] = field(
default=__A , metadata={"help": "Decoder layer dropout probability. Goes into model.config."} )
SCREAMING_SNAKE_CASE : Optional[float] = field(default=__A , metadata={"help": "Dropout probability. Goes into model.config."} )
SCREAMING_SNAKE_CASE : Optional[float] = field(
default=__A , metadata={"help": "Attention dropout probability. Goes into model.config."} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default="linear" , metadata={"help": f'Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}'} , ) | 39 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
if not nums:
raise ValueError('''List is empty''' )
return sum(SCREAMING_SNAKE_CASE__ ) / len(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod() | 39 | 1 |
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
lowerCAmelCase_ = logging.get_logger(__name__)
class snake_case_ ( __A ):
'''simple docstring'''
def __init__( self : Dict , *_UpperCamelCase : int , **_UpperCamelCase : Tuple ) ->None:
warnings.warn(
'''The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use ChineseCLIPImageProcessor instead.''' , _UpperCamelCase , )
super().__init__(*_UpperCamelCase , **_UpperCamelCase ) | 39 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : List[str] ) ->str:
snake_case_ = inspect.getfile(accelerate.test_utils )
snake_case_ = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps''', '''test_metrics.py'''] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
snake_case_ = test_metrics
@require_cpu
def snake_case__( self : str ) ->int:
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def snake_case__( self : Union[str, Any] ) ->Any:
debug_launcher(self.test_metrics.main )
@require_single_gpu
def snake_case__( self : List[Any] ) ->Tuple:
self.test_metrics.main()
@require_multi_gpu
def snake_case__( self : Any ) ->Union[str, Any]:
print(f'''Found {torch.cuda.device_count()} devices.''' )
snake_case_ = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() ) | 39 | 1 |
import re
from filelock import FileLock
try:
import nltk
lowerCAmelCase_ = True
except (ImportError, ModuleNotFoundError):
lowerCAmelCase_ = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
re.sub('''<n>''' , '''''' , SCREAMING_SNAKE_CASE__ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(SCREAMING_SNAKE_CASE__ ) ) | 39 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''huggingface/informer-tourism-monthly''': (
'''https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'''
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = "informer"
SCREAMING_SNAKE_CASE : int = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self : Dict , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : str = "student_t" , _UpperCamelCase : str = "nll" , _UpperCamelCase : int = 1 , _UpperCamelCase : List[int] = None , _UpperCamelCase : Optional[Union[str, bool]] = "mean" , _UpperCamelCase : int = 0 , _UpperCamelCase : int = 0 , _UpperCamelCase : int = 0 , _UpperCamelCase : int = 0 , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : int = 6_4 , _UpperCamelCase : int = 3_2 , _UpperCamelCase : int = 3_2 , _UpperCamelCase : int = 2 , _UpperCamelCase : int = 2 , _UpperCamelCase : int = 2 , _UpperCamelCase : int = 2 , _UpperCamelCase : bool = True , _UpperCamelCase : str = "gelu" , _UpperCamelCase : float = 0.05 , _UpperCamelCase : float = 0.1 , _UpperCamelCase : float = 0.1 , _UpperCamelCase : float = 0.1 , _UpperCamelCase : float = 0.1 , _UpperCamelCase : int = 1_0_0 , _UpperCamelCase : float = 0.02 , _UpperCamelCase : Dict=True , _UpperCamelCase : str = "prob" , _UpperCamelCase : int = 5 , _UpperCamelCase : bool = True , **_UpperCamelCase : Optional[Any] , ) ->Optional[int]:
# time series specific configuration
snake_case_ = prediction_length
snake_case_ = context_length or prediction_length
snake_case_ = distribution_output
snake_case_ = loss
snake_case_ = input_size
snake_case_ = num_time_features
snake_case_ = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
snake_case_ = scaling
snake_case_ = num_dynamic_real_features
snake_case_ = num_static_real_features
snake_case_ = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(_UpperCamelCase ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
snake_case_ = cardinality
else:
snake_case_ = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(_UpperCamelCase ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
snake_case_ = embedding_dimension
else:
snake_case_ = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
snake_case_ = num_parallel_samples
# Transformer architecture configuration
snake_case_ = input_size * len(self.lags_sequence ) + self._number_of_features
snake_case_ = d_model
snake_case_ = encoder_attention_heads
snake_case_ = decoder_attention_heads
snake_case_ = encoder_ffn_dim
snake_case_ = decoder_ffn_dim
snake_case_ = encoder_layers
snake_case_ = decoder_layers
snake_case_ = dropout
snake_case_ = attention_dropout
snake_case_ = activation_dropout
snake_case_ = encoder_layerdrop
snake_case_ = decoder_layerdrop
snake_case_ = activation_function
snake_case_ = init_std
snake_case_ = use_cache
# Informer
snake_case_ = attention_type
snake_case_ = sampling_factor
snake_case_ = distil
super().__init__(is_encoder_decoder=_UpperCamelCase , **_UpperCamelCase )
@property
def snake_case__( self : Optional[Any] ) ->int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
) | 39 | 1 |
from __future__ import annotations
import math
from collections.abc import Callable
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 100 , ):
snake_case_ = x_start
snake_case_ = fnc(SCREAMING_SNAKE_CASE__ )
snake_case_ = 0.0
for _ in range(SCREAMING_SNAKE_CASE__ ):
# Approximates curve as a sequence of linear lines and sums their length
snake_case_ = (x_end - x_start) / steps + xa
snake_case_ = fnc(SCREAMING_SNAKE_CASE__ )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
snake_case_ = xa
snake_case_ = fxa
return length
if __name__ == "__main__":
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
return math.sin(10 * x )
print('''f(x) = sin(10 * x)''')
print('''The length of the curve from x = -10 to x = 10 is:''')
lowerCAmelCase_ = 10
while i <= 10_00_00:
print(f"""With {i} steps: {line_length(f, -10, 10, i)}""")
i *= 10 | 39 |
import cmath
import math
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = math.radians(SCREAMING_SNAKE_CASE__ )
snake_case_ = math.radians(SCREAMING_SNAKE_CASE__ )
# Convert voltage and current to rectangular form
snake_case_ = cmath.rect(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ = cmath.rect(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod() | 39 | 1 |
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
SCREAMING_SNAKE_CASE__ : Optional[int] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : str = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__ : int = {
"""vocab_file""": {
"""allegro/herbert-base-cased""": """https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"""
},
"""merges_file""": {
"""allegro/herbert-base-cased""": """https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"""
},
}
SCREAMING_SNAKE_CASE__ : Optional[int] = {"""allegro/herbert-base-cased""": 5_14}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {}
class lowerCamelCase_ ( lowerCamelCase ):
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_INIT_CONFIGURATION
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = HerbertTokenizer
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase="<s>" , __lowerCAmelCase="<unk>" , __lowerCAmelCase="<pad>" , __lowerCAmelCase="<mask>" , __lowerCAmelCase="</s>" , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(
__lowerCAmelCase , __lowerCAmelCase , tokenizer_file=__lowerCAmelCase , cls_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , **__lowerCAmelCase , )
def A ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
"""simple docstring"""
__magic_name__ :str = [self.cls_token_id]
__magic_name__ :Tuple = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def A ( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCAmelCase )) + [1]
return [1] + ([0] * len(__lowerCAmelCase )) + [1] + ([0] * len(__lowerCAmelCase )) + [1]
def A ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
"""simple docstring"""
__magic_name__ :Dict = [self.sep_token_id]
__magic_name__ :int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
"""simple docstring"""
__magic_name__ :Optional[int] = self._tokenizer.model.save(__lowerCAmelCase , name=__lowerCAmelCase )
return tuple(__lowerCAmelCase )
| 0 |
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case_ :
'''simple docstring'''
def __init__( self : Optional[int] , _UpperCamelCase : Tuple , _UpperCamelCase : Optional[int]=1_3 , _UpperCamelCase : str=7 , _UpperCamelCase : int=True , _UpperCamelCase : Dict=True , _UpperCamelCase : int=False , _UpperCamelCase : Dict=True , _UpperCamelCase : Optional[int]=9_9 , _UpperCamelCase : str=3_2 , _UpperCamelCase : str=5 , _UpperCamelCase : str=4 , _UpperCamelCase : int=3_7 , _UpperCamelCase : int="gelu" , _UpperCamelCase : List[str]=0.1 , _UpperCamelCase : Dict=0.1 , _UpperCamelCase : str=5_1_2 , _UpperCamelCase : Optional[int]=1_6 , _UpperCamelCase : List[str]=2 , _UpperCamelCase : Any=0.02 , _UpperCamelCase : List[str]=3 , _UpperCamelCase : List[str]=4 , _UpperCamelCase : str=None , ) ->Dict:
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = num_choices
snake_case_ = scope
def snake_case__( self : str ) ->List[Any]:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_input_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ = None
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__( self : List[str] ) ->Tuple:
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , )
def snake_case__( self : int , _UpperCamelCase : int , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Any , _UpperCamelCase : List[str] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Union[str, Any] ) ->Dict:
snake_case_ = BioGptModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase )
snake_case_ = model(_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__( self : Optional[Any] , _UpperCamelCase : Dict , _UpperCamelCase : List[str] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : Optional[int] , _UpperCamelCase : Union[str, Any] , ) ->Optional[int]:
snake_case_ = BioGptForCausalLM(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__( self : Dict , _UpperCamelCase : str , _UpperCamelCase : List[str] , _UpperCamelCase : List[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str , *_UpperCamelCase : List[Any] ) ->Union[str, Any]:
snake_case_ = BioGptModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
# create attention mask
snake_case_ = torch.ones(input_ids.shape , dtype=torch.long , device=_UpperCamelCase )
snake_case_ = self.seq_length // 2
snake_case_ = 0
# first forward pass
snake_case_, snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase ).to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case_ = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
snake_case_ = ids_tensor((1,) , _UpperCamelCase ).item() + 1
snake_case_ = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
snake_case_ = random_other_next_tokens
# append to next input_ids and attn_mask
snake_case_ = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case_ = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=_UpperCamelCase )] , dim=1 , )
# get two different outputs
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase )['''last_hidden_state''']
snake_case_ = model(_UpperCamelCase , past_key_values=_UpperCamelCase , attention_mask=_UpperCamelCase )['''last_hidden_state''']
# select random slice
snake_case_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case_ = output_from_no_past[:, -1, random_slice_idx].detach()
snake_case_ = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-3 ) )
def snake_case__( self : Union[str, Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Dict , *_UpperCamelCase : List[Any] ) ->int:
snake_case_ = BioGptModel(config=_UpperCamelCase ).to(_UpperCamelCase ).eval()
snake_case_ = torch.ones(input_ids.shape , dtype=torch.long , device=_UpperCamelCase )
# first forward pass
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase , use_cache=_UpperCamelCase )
snake_case_, snake_case_ = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
snake_case_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case_ = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
snake_case_ = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case_ = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase )['''last_hidden_state''']
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase , past_key_values=_UpperCamelCase )[
'''last_hidden_state'''
]
# select random slice
snake_case_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case_ = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-3 ) )
def snake_case__( self : int , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str , _UpperCamelCase : str , _UpperCamelCase : Dict , _UpperCamelCase : Optional[Any] , *_UpperCamelCase : List[Any] , _UpperCamelCase : List[str]=False ) ->Dict:
snake_case_ = BioGptForCausalLM(_UpperCamelCase )
model.to(_UpperCamelCase )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
snake_case_ = model(_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def snake_case__( self : List[Any] , _UpperCamelCase : Optional[int] , *_UpperCamelCase : Dict ) ->Dict:
snake_case_ = BioGptModel(_UpperCamelCase )
snake_case_ = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def snake_case__( self : Any , _UpperCamelCase : Tuple , _UpperCamelCase : List[str] , _UpperCamelCase : List[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : int , *_UpperCamelCase : List[str] ) ->int:
snake_case_ = self.num_labels
snake_case_ = BioGptForTokenClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case__( self : Optional[Any] ) ->int:
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
),
) = config_and_inputs
snake_case_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class snake_case_ ( __A , __A , __A , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE : Tuple = (BioGptForCausalLM,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE : Optional[Any] = (
{
"feature-extraction": BioGptModel,
"text-classification": BioGptForSequenceClassification,
"text-generation": BioGptForCausalLM,
"token-classification": BioGptForTokenClassification,
"zero-shot": BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE : Tuple = False
def snake_case__( self : List[str] ) ->Union[str, Any]:
snake_case_ = BioGptModelTester(self )
snake_case_ = ConfigTester(self , config_class=_UpperCamelCase , hidden_size=3_7 )
def snake_case__( self : str ) ->int:
self.config_tester.run_common_tests()
def snake_case__( self : str ) ->Tuple:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def snake_case__( self : Tuple ) ->List[Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case_ = type
self.model_tester.create_and_check_model(*_UpperCamelCase )
def snake_case__( self : Tuple ) ->str:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*_UpperCamelCase )
def snake_case__( self : Union[str, Any] ) ->Dict:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*_UpperCamelCase , gradient_checkpointing=_UpperCamelCase )
def snake_case__( self : Optional[int] ) ->List[Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*_UpperCamelCase )
def snake_case__( self : List[Any] ) ->Union[str, Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*_UpperCamelCase )
def snake_case__( self : Optional[int] ) ->Optional[int]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*_UpperCamelCase )
@slow
def snake_case__( self : int ) ->Optional[Any]:
snake_case_ = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(_UpperCamelCase )
snake_case_ = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
snake_case_ = '''left'''
# Define PAD Token = EOS Token = 50256
snake_case_ = tokenizer.eos_token
snake_case_ = model.config.eos_token_id
# use different length sentences to test batching
snake_case_ = [
'''Hello, my dog is a little''',
'''Today, I''',
]
snake_case_ = tokenizer(_UpperCamelCase , return_tensors='''pt''' , padding=_UpperCamelCase )
snake_case_ = inputs['''input_ids'''].to(_UpperCamelCase )
snake_case_ = model.generate(
input_ids=_UpperCamelCase , attention_mask=inputs['''attention_mask'''].to(_UpperCamelCase ) , )
snake_case_ = tokenizer(sentences[0] , return_tensors='''pt''' ).input_ids.to(_UpperCamelCase )
snake_case_ = model.generate(input_ids=_UpperCamelCase )
snake_case_ = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item()
snake_case_ = tokenizer(sentences[1] , return_tensors='''pt''' ).input_ids.to(_UpperCamelCase )
snake_case_ = model.generate(input_ids=_UpperCamelCase , max_length=model.config.max_length - num_paddings )
snake_case_ = tokenizer.batch_decode(_UpperCamelCase , skip_special_tokens=_UpperCamelCase )
snake_case_ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=_UpperCamelCase )
snake_case_ = tokenizer.decode(output_padded[0] , skip_special_tokens=_UpperCamelCase )
snake_case_ = [
'''Hello, my dog is a little bit bigger than a little bit.''',
'''Today, I have a good idea of how to use the information''',
]
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
self.assertListEqual(_UpperCamelCase , [non_padded_sentence, padded_sentence] )
@slow
def snake_case__( self : Optional[int] ) ->List[str]:
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = BioGptModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def snake_case__( self : Optional[int] ) ->str:
snake_case_, snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = 3
snake_case_ = input_dict['''input_ids''']
snake_case_ = input_ids.ne(1 ).to(_UpperCamelCase )
snake_case_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
snake_case_ = BioGptForSequenceClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase , labels=_UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case__( self : str ) ->str:
snake_case_, snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = 3
snake_case_ = '''multi_label_classification'''
snake_case_ = input_dict['''input_ids''']
snake_case_ = input_ids.ne(1 ).to(_UpperCamelCase )
snake_case_ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
snake_case_ = BioGptForSequenceClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase , labels=_UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case__( self : int ) ->Any:
snake_case_ = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
snake_case_ = torch.tensor([[2, 4_8_0_5, 9, 6_5_6, 2_1]] )
snake_case_ = model(_UpperCamelCase )[0]
snake_case_ = 4_2_3_8_4
snake_case_ = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , _UpperCamelCase )
snake_case_ = torch.tensor(
[[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCamelCase , atol=1e-4 ) )
@slow
def snake_case__( self : List[str] ) ->Optional[int]:
snake_case_ = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
snake_case_ = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(_UpperCamelCase )
torch.manual_seed(0 )
snake_case_ = tokenizer('''COVID-19 is''' , return_tensors='''pt''' ).to(_UpperCamelCase )
snake_case_ = model.generate(
**_UpperCamelCase , min_length=1_0_0 , max_length=1_0_2_4 , num_beams=5 , early_stopping=_UpperCamelCase , )
snake_case_ = tokenizer.decode(output_ids[0] , skip_special_tokens=_UpperCamelCase )
snake_case_ = (
'''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'''
''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'''
''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'''
''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'''
''' more than 800,000 deaths.'''
)
self.assertEqual(_UpperCamelCase , _UpperCamelCase ) | 39 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'''
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class __lowerCamelCase (_a ):
_lowercase = """roformer"""
def __init__( self: List[Any],A_: Union[str, Any]=5_0000,A_: List[str]=None,A_: List[Any]=768,A_: List[Any]=12,A_: List[str]=12,A_: Any=3072,A_: Tuple="gelu",A_: List[str]=0.1,A_: int=0.1,A_: str=1536,A_: Dict=2,A_: List[str]=0.0_2,A_: int=1E-12,A_: List[str]=0,A_: Optional[Any]=False,A_: str=True,**A_: Optional[int],):
'''simple docstring'''
super().__init__(pad_token_id=A_,**A_ )
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size if embedding_size is None else embedding_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = hidden_act
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_size
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = rotary_value
__UpperCamelCase = use_cache
class __lowerCamelCase (_a ):
@property
def snake_case_ ( self: str ):
'''simple docstring'''
if self.task == "multiple-choice":
__UpperCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__UpperCamelCase = {0: 'batch', 1: 'sequence'}
__UpperCamelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 1 |
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
snake_case_ = (boundary[1] - boundary[0]) / steps
snake_case_ = boundary[0]
snake_case_ = boundary[1]
snake_case_ = make_points(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ = 0.0
y += (h / 2.0) * f(SCREAMING_SNAKE_CASE__ )
for i in x_i:
# print(i)
y += h * f(SCREAMING_SNAKE_CASE__ )
y += (h / 2.0) * f(SCREAMING_SNAKE_CASE__ )
return y
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = a + h
while x < (b - h):
yield x
snake_case_ = x + h
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): # enter your function here
snake_case_ = (x - 0) * (x - 0)
return y
def __SCREAMING_SNAKE_CASE ():
snake_case_ = 0.0 # Lower bound of integration
snake_case_ = 1.0 # Upper bound of integration
snake_case_ = 10.0 # define number of steps or resolution
snake_case_ = [a, b] # define boundary of integration
snake_case_ = method_a(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
print(F'''y = {y}''' )
if __name__ == "__main__":
main() | 39 | 0 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> Dict:
# A local function to see if a dot lands in the circle.
def is_in_circle(_snake_case :float , _snake_case :float ) -> bool:
_A = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
_A = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(_snake_case ) )
# The ratio of the area for circle to square is pi/4.
_A = proportion * 4
print(F'''The estimated value of pi is {pi_estimate}''' )
print(F'''The numpy value of pi is {pi}''' )
print(F'''The total error is {abs(pi - pi_estimate )}''' )
def SCREAMING_SNAKE_CASE_ ( _snake_case :int , _snake_case :Callable[[float], float] , _snake_case :float = 0.0 , _snake_case :float = 1.0 , ) -> float:
return mean(
function_to_integrate(uniform(_snake_case , _snake_case ) ) for _ in range(_snake_case ) ) * (max_value - min_value)
def SCREAMING_SNAKE_CASE_ ( _snake_case :int , _snake_case :float = 0.0 , _snake_case :float = 1.0 ) -> None:
def identity_function(_snake_case :float ) -> float:
return x
_A = area_under_curve_estimator(
_snake_case , _snake_case , _snake_case , _snake_case )
_A = (max_value * max_value - min_value * min_value) / 2
print('''******************''' )
print(F'''Estimating area under y=x where x varies from {min_value} to {max_value}''' )
print(F'''Estimated value is {estimated_value}''' )
print(F'''Expected value is {expected_value}''' )
print(F'''Total error is {abs(estimated_value - expected_value )}''' )
print('''******************''' )
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> None:
def function_to_integrate(_snake_case :float ) -> float:
return sqrt(4.0 - x * x )
_A = area_under_curve_estimator(
_snake_case , _snake_case , 0.0 , 2.0 )
print('''******************''' )
print('''Estimating pi using area_under_curve_estimator''' )
print(F'''Estimated value is {estimated_value}''' )
print(F'''Expected value is {pi}''' )
print(F'''Total error is {abs(estimated_value - pi )}''' )
print('''******************''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2 |
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
lowerCAmelCase_ = get_logger(__name__)
lowerCAmelCase_ = Path(__file__).parent / '''model_card_template.md'''
lowerCAmelCase_ = uuida().hex
lowerCAmelCase_ = os.getenv('''HF_HUB_OFFLINE''', '''''').upper() in ENV_VARS_TRUE_VALUES
lowerCAmelCase_ = os.getenv('''DISABLE_TELEMETRY''', '''''').upper() in ENV_VARS_TRUE_VALUES
lowerCAmelCase_ = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '''/api/telemetry/'''
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = None ):
snake_case_ = F'''diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}'''
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += F'''; torch/{_torch_version}'''
if is_flax_available():
ua += F'''; jax/{_jax_version}'''
ua += F'''; flax/{_flax_version}'''
if is_onnx_available():
ua += F'''; onnxruntime/{_onnxruntime_version}'''
# CI will set this value to True
if os.environ.get('''DIFFUSERS_IS_CI''' , '''''' ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
ua += "; " + "; ".join(F'''{k}/{v}''' for k, v in user_agent.items() )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
ua += "; " + user_agent
return ua
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None ):
if token is None:
snake_case_ = HfFolder.get_token()
if organization is None:
snake_case_ = whoami(SCREAMING_SNAKE_CASE__ )['''name''']
return F'''{username}/{model_id}'''
else:
return F'''{organization}/{model_id}'''
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if not is_jinja_available():
raise ValueError(
'''Modelcard rendering is based on Jinja templates.'''
''' Please make sure to have `jinja` installed before using `create_model_card`.'''
''' To install it, please run `pip install Jinja2`.''' )
if hasattr(SCREAMING_SNAKE_CASE__ , '''local_rank''' ) and args.local_rank not in [-1, 0]:
return
snake_case_ = args.hub_token if hasattr(SCREAMING_SNAKE_CASE__ , '''hub_token''' ) else None
snake_case_ = get_full_repo_name(SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ )
snake_case_ = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='''en''' , license='''apache-2.0''' , library_name='''diffusers''' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=SCREAMING_SNAKE_CASE__ , model_name=SCREAMING_SNAKE_CASE__ , repo_name=SCREAMING_SNAKE_CASE__ , dataset_name=args.dataset_name if hasattr(SCREAMING_SNAKE_CASE__ , '''dataset_name''' ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(SCREAMING_SNAKE_CASE__ , '''gradient_accumulation_steps''' ) else None
) , adam_betaa=args.adam_betaa if hasattr(SCREAMING_SNAKE_CASE__ , '''adam_beta1''' ) else None , adam_betaa=args.adam_betaa if hasattr(SCREAMING_SNAKE_CASE__ , '''adam_beta2''' ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(SCREAMING_SNAKE_CASE__ , '''adam_weight_decay''' ) else None , adam_epsilon=args.adam_epsilon if hasattr(SCREAMING_SNAKE_CASE__ , '''adam_epsilon''' ) else None , lr_scheduler=args.lr_scheduler if hasattr(SCREAMING_SNAKE_CASE__ , '''lr_scheduler''' ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(SCREAMING_SNAKE_CASE__ , '''lr_warmup_steps''' ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(SCREAMING_SNAKE_CASE__ , '''ema_inv_gamma''' ) else None , ema_power=args.ema_power if hasattr(SCREAMING_SNAKE_CASE__ , '''ema_power''' ) else None , ema_max_decay=args.ema_max_decay if hasattr(SCREAMING_SNAKE_CASE__ , '''ema_max_decay''' ) else None , mixed_precision=args.mixed_precision , )
snake_case_ = os.path.join(args.output_dir , '''README.md''' )
model_card.save(SCREAMING_SNAKE_CASE__ )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
if resolved_file is None or commit_hash is not None:
return commit_hash
snake_case_ = str(Path(SCREAMING_SNAKE_CASE__ ).as_posix() )
snake_case_ = re.search(R'''snapshots/([^/]+)/''' , SCREAMING_SNAKE_CASE__ )
if search is None:
return None
snake_case_ = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(SCREAMING_SNAKE_CASE__ ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
lowerCAmelCase_ = os.path.expanduser(
os.getenv('''HF_HOME''', os.path.join(os.getenv('''XDG_CACHE_HOME''', '''~/.cache'''), '''huggingface'''))
)
lowerCAmelCase_ = os.path.join(hf_cache_home, '''diffusers''')
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None ):
if new_cache_dir is None:
snake_case_ = DIFFUSERS_CACHE
if old_cache_dir is None:
snake_case_ = old_diffusers_cache
snake_case_ = Path(SCREAMING_SNAKE_CASE__ ).expanduser()
snake_case_ = Path(SCREAMING_SNAKE_CASE__ ).expanduser()
for old_blob_path in old_cache_dir.glob('''**/blobs/*''' ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
snake_case_ = new_cache_dir / old_blob_path.relative_to(SCREAMING_SNAKE_CASE__ )
new_blob_path.parent.mkdir(parents=SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
os.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
try:
os.symlink(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
except OSError:
logger.warning(
'''Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.''' )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
lowerCAmelCase_ = os.path.join(DIFFUSERS_CACHE, '''version_diffusers_cache.txt''')
if not os.path.isfile(cache_version_file):
lowerCAmelCase_ = 0
else:
with open(cache_version_file) as f:
try:
lowerCAmelCase_ = int(f.read())
except ValueError:
lowerCAmelCase_ = 0
if cache_version < 1:
lowerCAmelCase_ = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'''The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '''
'''existing cached models. This is a one-time operation, you can interrupt it or run it '''
'''later by calling `diffusers.utils.hub_utils.move_cache()`.'''
)
try:
move_cache()
except Exception as e:
lowerCAmelCase_ = '''\n'''.join(traceback.format_tb(e.__traceback__))
logger.error(
f"""There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease """
'''file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '''
'''message and we will do our best to help.'''
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, '''w''') as f:
f.write('''1''')
except Exception:
logger.warning(
f"""There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure """
'''the directory exists and can be written to.'''
)
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
if variant is not None:
snake_case_ = weights_name.split('''.''' )
snake_case_ = splits[:-1] + [variant] + splits[-1:]
snake_case_ = '''.'''.join(SCREAMING_SNAKE_CASE__ )
return weights_name
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , *,
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , ):
snake_case_ = str(SCREAMING_SNAKE_CASE__ )
if os.path.isfile(SCREAMING_SNAKE_CASE__ ):
return pretrained_model_name_or_path
elif os.path.isdir(SCREAMING_SNAKE_CASE__ ):
if os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ):
# Load from a PyTorch checkpoint
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ):
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return model_file
else:
raise EnvironmentError(
F'''Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.''' )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(SCREAMING_SNAKE_CASE__ ).base_version ) >= version.parse('''0.20.0''' )
):
try:
snake_case_ = hf_hub_download(
SCREAMING_SNAKE_CASE__ , filename=_add_variant(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , cache_dir=SCREAMING_SNAKE_CASE__ , force_download=SCREAMING_SNAKE_CASE__ , proxies=SCREAMING_SNAKE_CASE__ , resume_download=SCREAMING_SNAKE_CASE__ , local_files_only=SCREAMING_SNAKE_CASE__ , use_auth_token=SCREAMING_SNAKE_CASE__ , user_agent=SCREAMING_SNAKE_CASE__ , subfolder=SCREAMING_SNAKE_CASE__ , revision=revision or commit_hash , )
warnings.warn(
F'''Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.''' , SCREAMING_SNAKE_CASE__ , )
return model_file
except: # noqa: E722
warnings.warn(
F'''You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}\' so that the correct variant file can be added.''' , SCREAMING_SNAKE_CASE__ , )
try:
# 2. Load model file as usual
snake_case_ = hf_hub_download(
SCREAMING_SNAKE_CASE__ , filename=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , force_download=SCREAMING_SNAKE_CASE__ , proxies=SCREAMING_SNAKE_CASE__ , resume_download=SCREAMING_SNAKE_CASE__ , local_files_only=SCREAMING_SNAKE_CASE__ , use_auth_token=SCREAMING_SNAKE_CASE__ , user_agent=SCREAMING_SNAKE_CASE__ , subfolder=SCREAMING_SNAKE_CASE__ , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
F'''{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier '''
'''listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '''
'''token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '''
'''login`.''' )
except RevisionNotFoundError:
raise EnvironmentError(
F'''{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for '''
'''this model name. Check the model page at '''
F'''\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.''' )
except EntryNotFoundError:
raise EnvironmentError(
F'''{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.''' )
except HTTPError as err:
raise EnvironmentError(
F'''There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}''' )
except ValueError:
raise EnvironmentError(
F'''We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it'''
F''' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a'''
F''' directory containing a file named {weights_name} or'''
''' \nCheckout your internet connection or see how to run the library in'''
''' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.''' )
except EnvironmentError:
raise EnvironmentError(
F'''Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from '''
'''\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '''
F'''Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory '''
F'''containing a file named {weights_name}''' ) | 39 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase : int = logging.get_logger(__name__)
lowerCAmelCase : Any = torch.device('cpu')
def A_( ):
UpperCamelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCamelCase = Image.open(requests.get(A , stream=A).raw)
return im
def A_( A : Optional[int]):
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1703E00, 2.1107E00, -2.0811E00, 8.8685E-01, 2.4360E-01])
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9636E-01, 2.3478E-01, -1.6963E00, -1.7381E00, -8.6337E-01])
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2768E-01, -4.7429E-01, -1.0897E00, -1.0248E00, 3.5523E-02])
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5330E-01, 2.4211E-01, -6.0185E-01, -8.2789E-01, -6.0446E-02])
def A_( A : str , A : List[Any] , A : int):
UpperCamelCase = dct.pop(A)
UpperCamelCase = val
def A_( A : int):
UpperCamelCase = []
for k in state_dict.keys():
UpperCamelCase = k
if ".pwconv" in k:
UpperCamelCase = k_new.replace('.pwconv' , '.point_wise_conv')
if ".dwconv" in k:
UpperCamelCase = k_new.replace('.dwconv' , '.depth_wise_conv')
if ".Proj." in k:
UpperCamelCase = k_new.replace('.Proj.' , '.proj.')
if "patch_embed" in k_new:
UpperCamelCase = k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding')
if "network" in k_new:
UpperCamelCase = k_new.split('.')
if ls[2].isdigit():
UpperCamelCase = 'swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:])
else:
UpperCamelCase = k_new.replace('network' , 'swiftformer.encoder.network')
rename_keys.append((k, k_new))
return rename_keys
@torch.no_grad()
def A_( A : List[str] , A : Optional[int] , A : Tuple):
UpperCamelCase = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
UpperCamelCase = 1000
UpperCamelCase = 'huggingface/label-files'
UpperCamelCase = 'imagenet-1k-id2label.json'
UpperCamelCase = json.load(open(hf_hub_download(A , A , repo_type='dataset') , 'r'))
UpperCamelCase = {int(A): v for k, v in idalabel.items()}
UpperCamelCase = idalabel
UpperCamelCase = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
UpperCamelCase = [3, 3, 6, 4]
UpperCamelCase = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
UpperCamelCase = [3, 3, 9, 6]
UpperCamelCase = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
UpperCamelCase = [4, 3, 10, 5]
UpperCamelCase = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
UpperCamelCase = [4, 4, 12, 6]
UpperCamelCase = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('https'):
UpperCamelCase = torch.hub.load_state_dict_from_url(A , map_location='cpu' , check_hash=A)
else:
UpperCamelCase = torch.load(A , map_location='cpu')
UpperCamelCase = checkpoint
UpperCamelCase = create_rename_keys(A)
for rename_key_src, rename_key_dest in rename_keys:
rename_key(A , A , A)
# load HuggingFace model
UpperCamelCase = SwiftFormerForImageClassification(A).eval()
hf_model.load_state_dict(A)
# prepare test inputs
UpperCamelCase = prepare_img()
UpperCamelCase = ViTImageProcessor.from_pretrained('preprocessor_config')
UpperCamelCase = processor(images=A , return_tensors='pt')
# compare outputs from both models
UpperCamelCase = get_expected_output(A)
UpperCamelCase = hf_model(inputs['pixel_values']).logits
assert hf_logits.shape == torch.Size([1, 1000])
assert torch.allclose(hf_logits[0, 0:5] , A , atol=1E-3)
Path(A).mkdir(exist_ok=A)
print(f'''Saving model {swiftformer_name} to {pytorch_dump_folder_path}''')
hf_model.save_pretrained(A)
if __name__ == "__main__":
lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swiftformer_name',
default='swiftformer_xs',
choices=['swiftformer_xs', 'swiftformer_s', 'swiftformer_l1', 'swiftformer_l3'],
type=str,
help='Name of the SwiftFormer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='./converted_outputs/',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--original_ckpt', default=None, type=str, help='Path to the original model checkpoint.')
lowerCAmelCase : List[Any] = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 3 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''Intel/dpt-large''': '''https://huggingface.co/Intel/dpt-large/resolve/main/config.json''',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = "dpt"
def __init__( self : Optional[Any] , _UpperCamelCase : Tuple=7_6_8 , _UpperCamelCase : Dict=1_2 , _UpperCamelCase : Union[str, Any]=1_2 , _UpperCamelCase : List[Any]=3_0_7_2 , _UpperCamelCase : Dict="gelu" , _UpperCamelCase : Union[str, Any]=0.0 , _UpperCamelCase : Optional[int]=0.0 , _UpperCamelCase : Optional[int]=0.02 , _UpperCamelCase : List[str]=1e-12 , _UpperCamelCase : Any=3_8_4 , _UpperCamelCase : int=1_6 , _UpperCamelCase : Any=3 , _UpperCamelCase : Dict=False , _UpperCamelCase : str=True , _UpperCamelCase : Union[str, Any]=[2, 5, 8, 1_1] , _UpperCamelCase : List[str]="project" , _UpperCamelCase : Optional[int]=[4, 2, 1, 0.5] , _UpperCamelCase : Dict=[9_6, 1_9_2, 3_8_4, 7_6_8] , _UpperCamelCase : Dict=2_5_6 , _UpperCamelCase : Optional[Any]=-1 , _UpperCamelCase : int=False , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : str=0.4 , _UpperCamelCase : Tuple=2_5_5 , _UpperCamelCase : Union[str, Any]=0.1 , _UpperCamelCase : Tuple=[1, 1_0_2_4, 2_4, 2_4] , _UpperCamelCase : List[str]=[0, 1] , _UpperCamelCase : List[Any]=None , **_UpperCamelCase : Dict , ) ->Any:
super().__init__(**_UpperCamelCase )
snake_case_ = hidden_size
snake_case_ = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info('''Initializing the config with a `BiT` backbone.''' )
snake_case_ = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
}
snake_case_ = BitConfig(**_UpperCamelCase )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
logger.info('''Initializing the config with a `BiT` backbone.''' )
snake_case_ = BitConfig(**_UpperCamelCase )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
snake_case_ = backbone_config
else:
raise ValueError(
f'''backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.''' )
snake_case_ = backbone_featmap_shape
snake_case_ = neck_ignore_stages
if readout_type != "project":
raise ValueError('''Readout type must be \'project\' when using `DPT-hybrid` mode.''' )
else:
snake_case_ = None
snake_case_ = None
snake_case_ = []
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = qkv_bias
snake_case_ = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError('''Readout_type must be one of [\'ignore\', \'add\', \'project\']''' )
snake_case_ = readout_type
snake_case_ = reassemble_factors
snake_case_ = neck_hidden_sizes
snake_case_ = fusion_hidden_size
snake_case_ = head_in_index
snake_case_ = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
snake_case_ = use_auxiliary_head
snake_case_ = auxiliary_loss_weight
snake_case_ = semantic_loss_ignore_index
snake_case_ = semantic_classifier_dropout
def snake_case__( self : List[str] ) ->List[Any]:
snake_case_ = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
snake_case_ = self.backbone_config.to_dict()
snake_case_ = self.__class__.model_type
return output | 39 | 0 |
"""simple docstring"""
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class a ( a__ , a__ ):
snake_case__ = '''pixel_values'''
snake_case__ = False
snake_case__ = TimmBackboneConfig
def __init__( self , _snake_case , **_snake_case ):
"""simple docstring"""
requires_backends(self , 'timm' )
super().__init__(_snake_case )
lowerCAmelCase = config
if config.backbone is None:
raise ValueError('backbone is not set in the config. Please set it to a timm model name.' )
if config.backbone not in timm.list_models():
raise ValueError(F'backbone {config.backbone} is not supported by timm.' )
if hasattr(_snake_case , 'out_features' ) and config.out_features is not None:
raise ValueError('out_features is not supported by TimmBackbone. Please use out_indices instead.' )
lowerCAmelCase = getattr(_snake_case , 'use_pretrained_backbone' , _snake_case )
if pretrained is None:
raise ValueError('use_pretrained_backbone is not set in the config. Please set it to True or False.' )
# We just take the final layer by default. This matches the default for the transformers models.
lowerCAmelCase = config.out_indices if getattr(_snake_case , 'out_indices' , _snake_case ) is not None else (-1,)
lowerCAmelCase = timm.create_model(
config.backbone , pretrained=_snake_case , features_only=config.features_only , in_chans=config.num_channels , out_indices=_snake_case , **_snake_case , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
lowerCAmelCase = self._backbone.return_layers
lowerCAmelCase = {layer['module']: str(_snake_case ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(_snake_case )
@classmethod
def UpperCamelCase__ ( cls , _snake_case , *_snake_case , **_snake_case ):
"""simple docstring"""
requires_backends(cls , ['vision', 'timm'] )
from ...models.timm_backbone import TimmBackboneConfig
lowerCAmelCase = kwargs.pop('config' , TimmBackboneConfig() )
lowerCAmelCase = kwargs.pop('use_timm_backbone' , _snake_case )
if not use_timm:
raise ValueError('use_timm_backbone must be True for timm backbones' )
lowerCAmelCase = kwargs.pop('num_channels' , config.num_channels )
lowerCAmelCase = kwargs.pop('features_only' , config.features_only )
lowerCAmelCase = kwargs.pop('use_pretrained_backbone' , config.use_pretrained_backbone )
lowerCAmelCase = kwargs.pop('out_indices' , config.out_indices )
lowerCAmelCase = TimmBackboneConfig(
backbone=_snake_case , num_channels=_snake_case , features_only=_snake_case , use_pretrained_backbone=_snake_case , out_indices=_snake_case , )
return super()._from_config(_snake_case , **_snake_case )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self , _snake_case , _snake_case=None , _snake_case=None , _snake_case=None , **_snake_case ):
"""simple docstring"""
lowerCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError('Cannot output attentions for timm backbones at the moment' )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
lowerCAmelCase = self._all_layers
lowerCAmelCase = self._backbone(_snake_case , **_snake_case )
lowerCAmelCase = self._return_layers
lowerCAmelCase = tuple(hidden_states[i] for i in self.out_indices )
else:
lowerCAmelCase = self._backbone(_snake_case , **_snake_case )
lowerCAmelCase = None
lowerCAmelCase = tuple(_snake_case )
lowerCAmelCase = tuple(_snake_case ) if hidden_states is not None else None
if not return_dict:
lowerCAmelCase = (feature_maps,)
if output_hidden_states:
lowerCAmelCase = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=_snake_case , hidden_states=_snake_case , attentions=_snake_case )
| 4 |
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
lowerCAmelCase_ = logging.getLogger(__name__)
lowerCAmelCase_ = '''pytorch_model.bin'''
@dataclasses.dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = dataclasses.field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models."} )
SCREAMING_SNAKE_CASE : Optional[str] = dataclasses.field(
default=__A , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co."} , )
@dataclasses.dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = dataclasses.field(metadata={"help": "A csv or a json file containing the training data."} )
SCREAMING_SNAKE_CASE : str = dataclasses.field(metadata={"help": "A csv or a json file containing the data to predict on."} )
SCREAMING_SNAKE_CASE : Optional[str] = dataclasses.field(
default=__A , metadata={"help": "A csv or a json file containing the validation data."} )
SCREAMING_SNAKE_CASE : Optional[str] = dataclasses.field(
default=__A , metadata={"help": "The name of the task to train on."} , )
SCREAMING_SNAKE_CASE : Optional[List[str]] = dataclasses.field(
default=__A , metadata={"help": "The list of labels for the task."} )
@dataclasses.dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = dataclasses.field(
metadata={"help": "The output directory where the model predictions and checkpoints will be written."} )
SCREAMING_SNAKE_CASE : Optional[str] = dataclasses.field(
default="accuracy" , metadata={"help": "The evaluation metric used for the task."} )
SCREAMING_SNAKE_CASE : Optional[str] = dataclasses.field(
default="no" , metadata={
"help": "The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]"
} , )
SCREAMING_SNAKE_CASE : Optional[int] = dataclasses.field(
default=10 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
SCREAMING_SNAKE_CASE : Optional[float] = dataclasses.field(
default=0.0 , metadata={
"help": "How much the specified evaluation metric must improve to satisfy early stopping conditions."
} , )
SCREAMING_SNAKE_CASE : Optional[bool] = dataclasses.field(
default=__A , metadata={"help": "Whether to filter the pseudo-labeled data based on the confidence score."} , )
SCREAMING_SNAKE_CASE : Optional[bool] = dataclasses.field(
default=__A , metadata={"help": "Whether to filter the pseudo-labeled data based on the validation performance."} , )
SCREAMING_SNAKE_CASE : Optional[bool] = dataclasses.field(
default=__A , metadata={"help": "Whether to fine-tune on labeled data after pseudo training."} , )
SCREAMING_SNAKE_CASE : Optional[float] = dataclasses.field(
default=0.0 , metadata={"help": "Confidence threshold for pseudo-labeled data filtering."} , )
SCREAMING_SNAKE_CASE : Optional[int] = dataclasses.field(
default=100 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
SCREAMING_SNAKE_CASE : Optional[int] = dataclasses.field(
default=__A , metadata={"help": "Random seed for initialization."} , )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
snake_case_ = dataset.filter(lambda SCREAMING_SNAKE_CASE__ : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
snake_case_ = int(eval_result * len(SCREAMING_SNAKE_CASE__ ) )
print(SCREAMING_SNAKE_CASE__ )
snake_case_ = dataset.sort('''probability''' , reverse=SCREAMING_SNAKE_CASE__ )
snake_case_ = dataset.select(range(SCREAMING_SNAKE_CASE__ ) )
snake_case_ = dataset.remove_columns(['''label''', '''probability'''] )
snake_case_ = dataset.rename_column('''prediction''' , '''label''' )
snake_case_ = dataset.map(lambda SCREAMING_SNAKE_CASE__ : {"label": idalabel[example["label"]]} )
snake_case_ = dataset.shuffle(seed=args.seed )
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , F'''train_pseudo.{args.data_file_extension}''' )
if args.data_file_extension == "csv":
dataset.to_csv(SCREAMING_SNAKE_CASE__ , index=SCREAMING_SNAKE_CASE__ )
else:
dataset.to_json(SCREAMING_SNAKE_CASE__ )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
snake_case_ = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
snake_case_ = STModelArguments(model_name_or_path=SCREAMING_SNAKE_CASE__ )
snake_case_ = STDataArguments(train_file=SCREAMING_SNAKE_CASE__ , infer_file=SCREAMING_SNAKE_CASE__ )
snake_case_ = STTrainingArguments(output_dir=SCREAMING_SNAKE_CASE__ )
snake_case_ = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(SCREAMING_SNAKE_CASE__ ).items():
setattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for key, value in kwargs.items():
if hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
setattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Sanity checks
snake_case_ = {}
snake_case_ = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
snake_case_ = args.train_file
snake_case_ = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
snake_case_ = args.eval_file
for key in data_files:
snake_case_ = data_files[key].split('''.''' )[-1]
assert extension in ["csv", "json"], F'''`{key}_file` should be a csv or a json file.'''
if args.data_file_extension is None:
snake_case_ = extension
else:
assert extension == args.data_file_extension, F'''`{key}_file` should be a {args.data_file_extension} file`.'''
assert (
args.eval_metric in datasets.list_metrics()
), F'''{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.'''
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info('''Creating the initial data directory for self-training...''' )
snake_case_ = F'''{args.output_dir}/self-train_iter-{{}}'''.format
snake_case_ = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=SCREAMING_SNAKE_CASE__ )
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
snake_case_ = None
snake_case_ = None
snake_case_ = 0
snake_case_ = False
# Show the progress bar
snake_case_ = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
snake_case_ = data_dir_format(SCREAMING_SNAKE_CASE__ )
assert os.path.exists(SCREAMING_SNAKE_CASE__ )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''stage-1''' )
snake_case_ = {
'''accelerator''': accelerator,
'''model_name_or_path''': args.model_name_or_path,
'''cache_dir''': args.cache_dir,
'''do_train''': True,
'''train_file''': data_files['''train'''] if iteration == 0 else data_files['''train_pseudo'''],
'''do_eval''': True if args.eval_file is not None else False,
'''eval_file''': data_files['''eval'''],
'''do_predict''': True,
'''infer_file''': data_files['''infer'''],
'''task_name''': args.task_name,
'''label_list''': args.label_list,
'''output_dir''': current_output_dir,
'''eval_metric''': args.eval_metric,
'''evaluation_strategy''': args.evaluation_strategy,
'''early_stopping_patience''': args.early_stopping_patience,
'''early_stopping_threshold''': args.early_stopping_threshold,
'''seed''': args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
arguments_dict.update({key: value} )
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''best-checkpoint''' , SCREAMING_SNAKE_CASE__ )
if os.path.exists(SCREAMING_SNAKE_CASE__ ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.''' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 1 *****''' , SCREAMING_SNAKE_CASE__ )
finetune(**SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
assert os.path.exists(SCREAMING_SNAKE_CASE__ )
logger.info('''Self-training job completed: iteration: %d, stage: 1.''' , SCREAMING_SNAKE_CASE__ )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''best-checkpoint''' )
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''stage-2''' )
# Update arguments_dict
snake_case_ = model_path
snake_case_ = data_files['''train''']
snake_case_ = current_output_dir
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''best-checkpoint''' , SCREAMING_SNAKE_CASE__ )
if os.path.exists(SCREAMING_SNAKE_CASE__ ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.''' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 2 *****''' , SCREAMING_SNAKE_CASE__ )
finetune(**SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
assert os.path.exists(SCREAMING_SNAKE_CASE__ )
logger.info('''Self-training job completed: iteration: %d, stage: 2.''' , SCREAMING_SNAKE_CASE__ )
snake_case_ = iteration
snake_case_ = data_dir_format(iteration + 1 )
snake_case_ = AutoConfig.from_pretrained(os.path.join(SCREAMING_SNAKE_CASE__ , '''best-checkpoint''' ) )
snake_case_ = config.idalabel
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''eval_results_best-checkpoint.json''' )
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''test_results_best-checkpoint.json''' )
assert os.path.exists(SCREAMING_SNAKE_CASE__ )
with open(SCREAMING_SNAKE_CASE__ , '''r''' ) as f:
snake_case_ = float(json.load(SCREAMING_SNAKE_CASE__ )[args.eval_metric] )
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''infer_output_best-checkpoint.csv''' )
assert os.path.exists(SCREAMING_SNAKE_CASE__ )
# Loading the dataset from local csv or json files.
snake_case_ = load_dataset(args.data_file_extension , data_files={'''data''': data_files['''infer''']} )['''data''']
snake_case_ = load_dataset('''csv''' , data_files={'''data''': infer_output_file} )['''data''']
if accelerator.is_main_process:
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
shutil.copy(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , F'''eval_results_iter-{iteration}.json''' ) )
if os.path.exists(SCREAMING_SNAKE_CASE__ ):
shutil.copy(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , F'''test_results_iter-{iteration}.json''' ) )
create_pseudo_labeled_data(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , F'''train_pseudo.{args.data_file_extension}''' )
if args.evaluation_strategy != IntervalStrategy.NO.value:
snake_case_ = eval_result
if best_iteration is None:
snake_case_ = new_iteration
snake_case_ = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
snake_case_ = new_iteration
snake_case_ = new_eval_result
snake_case_ = 0
else:
if new_eval_result == best_eval_result:
snake_case_ = new_iteration
snake_case_ = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
snake_case_ = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info('''Best iteration: %d''' , SCREAMING_SNAKE_CASE__ )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(SCREAMING_SNAKE_CASE__ , F'''eval_results_iter-{iteration}.json''' ) , os.path.join(SCREAMING_SNAKE_CASE__ , '''eval_results_best-iteration.json''' ) , )
else:
# Assume that the last iteration is the best
logger.info('''Best iteration: %d''' , args.max_selftrain_iterations - 1 )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(SCREAMING_SNAKE_CASE__ , F'''eval_results_iter-{args.max_selftrain_iterations - 1}.json''' ) , os.path.join(SCREAMING_SNAKE_CASE__ , '''eval_results_best-iteration.json''' ) , ) | 39 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
"""facebook/xmod-base""": """https://huggingface.co/facebook/xmod-base/resolve/main/config.json""",
"""facebook/xmod-large-prenorm""": """https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json""",
"""facebook/xmod-base-13-125k""": """https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json""",
"""facebook/xmod-base-30-125k""": """https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json""",
"""facebook/xmod-base-30-195k""": """https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json""",
"""facebook/xmod-base-60-125k""": """https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json""",
"""facebook/xmod-base-60-265k""": """https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json""",
"""facebook/xmod-base-75-125k""": """https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json""",
"""facebook/xmod-base-75-269k""": """https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json""",
}
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : List[Any] = '''xmod'''
def __init__( self , _lowercase=30_522 , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3_072 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=512 , _lowercase=2 , _lowercase=0.02 , _lowercase=1e-12 , _lowercase=1 , _lowercase=0 , _lowercase=2 , _lowercase="absolute" , _lowercase=True , _lowercase=None , _lowercase=False , _lowercase=2 , _lowercase=False , _lowercase=True , _lowercase=True , _lowercase=("en_XX",) , _lowercase=None , **_lowercase , ):
"""simple docstring"""
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = hidden_act
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = position_embedding_type
_lowerCAmelCase = use_cache
_lowerCAmelCase = classifier_dropout
_lowerCAmelCase = pre_norm
_lowerCAmelCase = adapter_reduction_factor
_lowerCAmelCase = adapter_layer_norm
_lowerCAmelCase = adapter_reuse_layer_norm
_lowerCAmelCase = ln_before_adapter
_lowerCAmelCase = list(_lowercase )
_lowerCAmelCase = default_language
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def _lowercase ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
_lowerCAmelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_lowerCAmelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 5 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case_ ( __A , __A , __A , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = AltDiffusionPipeline
SCREAMING_SNAKE_CASE : Dict = TEXT_TO_IMAGE_PARAMS
SCREAMING_SNAKE_CASE : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
SCREAMING_SNAKE_CASE : Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
SCREAMING_SNAKE_CASE : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
def snake_case__( self : Dict ) ->int:
torch.manual_seed(0 )
snake_case_ = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , )
snake_case_ = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_UpperCamelCase , set_alpha_to_one=_UpperCamelCase , )
torch.manual_seed(0 )
snake_case_ = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
snake_case_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_0_0_2 , )
snake_case_ = CLIPTextModel(_UpperCamelCase )
snake_case_ = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
snake_case_ = 7_7
snake_case_ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def snake_case__( self : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : Dict=0 ) ->Any:
if str(_UpperCamelCase ).startswith('''mps''' ):
snake_case_ = torch.manual_seed(_UpperCamelCase )
else:
snake_case_ = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
snake_case_ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def snake_case__( self : Dict ) ->List[str]:
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def snake_case__( self : List[str] ) ->Any:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def snake_case__( self : Dict ) ->Any:
snake_case_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case_ = self.get_dummy_components()
torch.manual_seed(0 )
snake_case_ = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_0_0_2 , )
# TODO: remove after fixing the non-deterministic text encoder
snake_case_ = RobertaSeriesModelWithTransformation(_UpperCamelCase )
snake_case_ = text_encoder
snake_case_ = AltDiffusionPipeline(**_UpperCamelCase )
snake_case_ = alt_pipe.to(_UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=_UpperCamelCase )
snake_case_ = self.get_dummy_inputs(_UpperCamelCase )
snake_case_ = '''A photo of an astronaut'''
snake_case_ = alt_pipe(**_UpperCamelCase )
snake_case_ = output.images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
snake_case_ = np.array(
[0.5748162, 0.60447145, 0.48821217, 0.50100636, 0.5431185, 0.45763683, 0.49657696, 0.48132733, 0.47573093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case__( self : Tuple ) ->Union[str, Any]:
snake_case_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case_ = self.get_dummy_components()
snake_case_ = PNDMScheduler(skip_prk_steps=_UpperCamelCase )
torch.manual_seed(0 )
snake_case_ = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_0_0_2 , )
# TODO: remove after fixing the non-deterministic text encoder
snake_case_ = RobertaSeriesModelWithTransformation(_UpperCamelCase )
snake_case_ = text_encoder
snake_case_ = AltDiffusionPipeline(**_UpperCamelCase )
snake_case_ = alt_pipe.to(_UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=_UpperCamelCase )
snake_case_ = self.get_dummy_inputs(_UpperCamelCase )
snake_case_ = alt_pipe(**_UpperCamelCase )
snake_case_ = output.images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
snake_case_ = np.array(
[0.51605093, 0.5707241, 0.47365507, 0.50578886, 0.5633877, 0.4642503, 0.5182081, 0.48763484, 0.49084237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : int ) ->List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__( self : List[str] ) ->Tuple:
# make sure here that pndm scheduler skips prk
snake_case_ = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , safety_checker=_UpperCamelCase )
snake_case_ = alt_pipe.to(_UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=_UpperCamelCase )
snake_case_ = '''A painting of a squirrel eating a burger'''
snake_case_ = torch.manual_seed(0 )
snake_case_ = alt_pipe([prompt] , generator=_UpperCamelCase , guidance_scale=6.0 , num_inference_steps=2_0 , output_type='''np''' )
snake_case_ = output.images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
snake_case_ = np.array([0.1010, 0.0800, 0.0794, 0.0885, 0.0843, 0.0762, 0.0769, 0.0729, 0.0586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case__( self : List[str] ) ->Optional[Any]:
snake_case_ = DDIMScheduler.from_pretrained('''BAAI/AltDiffusion''' , subfolder='''scheduler''' )
snake_case_ = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , scheduler=_UpperCamelCase , safety_checker=_UpperCamelCase )
snake_case_ = alt_pipe.to(_UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=_UpperCamelCase )
snake_case_ = '''A painting of a squirrel eating a burger'''
snake_case_ = torch.manual_seed(0 )
snake_case_ = alt_pipe([prompt] , generator=_UpperCamelCase , num_inference_steps=2 , output_type='''numpy''' )
snake_case_ = output.images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
snake_case_ = np.array([0.4019, 0.4052, 0.3810, 0.4119, 0.3916, 0.3982, 0.4651, 0.4195, 0.5323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 39 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCamelCase = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 6 |
from math import factorial
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError('''Please enter positive integers for n and k where n >= k''' )
return factorial(SCREAMING_SNAKE_CASE__ ) // (factorial(SCREAMING_SNAKE_CASE__ ) * factorial(n - k ))
if __name__ == "__main__":
print(
'''The number of five-card hands possible from a standard''',
f"""fifty-two card deck is: {combinations(52, 5)}\n""",
)
print(
'''If a class of 40 students must be arranged into groups of''',
f"""4 for group projects, there are {combinations(40, 4)} ways""",
'''to arrange them.\n''',
)
print(
'''If 10 teams are competing in a Formula One race, there''',
f"""are {combinations(10, 3)} ways that first, second and""",
'''third place can be awarded.''',
) | 39 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'''google/pegasus-large''': '''https://huggingface.co/google/pegasus-large/resolve/main/config.json''',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Dict = '''pegasus'''
UpperCAmelCase : Any = ['''past_key_values''']
UpperCAmelCase : Tuple = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Optional[Any] , _UpperCAmelCase : List[Any]=50_265 , _UpperCAmelCase : Optional[int]=1_024 , _UpperCAmelCase : Any=12 , _UpperCAmelCase : Optional[Any]=4_096 , _UpperCAmelCase : str=16 , _UpperCAmelCase : Any=12 , _UpperCAmelCase : List[str]=4_096 , _UpperCAmelCase : Any=16 , _UpperCAmelCase : Any=0.0 , _UpperCAmelCase : Optional[Any]=0.0 , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Optional[Any]="gelu" , _UpperCAmelCase : Any=1_024 , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : int=0.0 , _UpperCAmelCase : List[Any]=0.0 , _UpperCAmelCase : Optional[Any]=0.02 , _UpperCAmelCase : Tuple=0 , _UpperCAmelCase : Optional[Any]=False , _UpperCAmelCase : Union[str, Any]=0 , _UpperCAmelCase : Any=1 , _UpperCAmelCase : Tuple=1 , **_UpperCAmelCase : str , ):
_A = vocab_size
_A = max_position_embeddings
_A = d_model
_A = encoder_ffn_dim
_A = encoder_layers
_A = encoder_attention_heads
_A = decoder_ffn_dim
_A = decoder_layers
_A = decoder_attention_heads
_A = dropout
_A = attention_dropout
_A = activation_dropout
_A = activation_function
_A = init_std
_A = encoder_layerdrop
_A = decoder_layerdrop
_A = use_cache
_A = encoder_layers
_A = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , is_encoder_decoder=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , forced_eos_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
@property
def lowerCAmelCase_ ( self : Optional[int] ):
return self.encoder_attention_heads
@property
def lowerCAmelCase_ ( self : Optional[int] ):
return self.d_model
| 7 |
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
lowerCAmelCase_ = sys.version_info >= (3, 10)
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None ):
return field(default_factory=lambda: default , metadata=SCREAMING_SNAKE_CASE__ )
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : float
SCREAMING_SNAKE_CASE : str
SCREAMING_SNAKE_CASE : bool
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = 42
SCREAMING_SNAKE_CASE : str = field(default="toto" , metadata={"help": "help message"} )
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : bool = False
SCREAMING_SNAKE_CASE : bool = True
SCREAMING_SNAKE_CASE : Optional[bool] = None
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = "titi"
SCREAMING_SNAKE_CASE : Any = "toto"
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = "titi"
SCREAMING_SNAKE_CASE : Optional[Any] = "toto"
SCREAMING_SNAKE_CASE : Any = 42
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : BasicEnum = "toto"
def snake_case__( self : Tuple ) ->List[str]:
snake_case_ = BasicEnum(self.foo )
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : MixedTypeEnum = "toto"
def snake_case__( self : Union[str, Any] ) ->Dict:
snake_case_ = MixedTypeEnum(self.foo )
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Optional[float] = field(default=__A , metadata={"help": "help message"} )
SCREAMING_SNAKE_CASE : Optional[str] = None
SCREAMING_SNAKE_CASE : Optional[List[str]] = list_field(default=[] )
SCREAMING_SNAKE_CASE : Optional[List[int]] = list_field(default=[] )
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[int] = list_field(default=[] )
SCREAMING_SNAKE_CASE : List[int] = list_field(default=[1, 2, 3] )
SCREAMING_SNAKE_CASE : List[str] = list_field(default=["Hallo", "Bonjour", "Hello"] )
SCREAMING_SNAKE_CASE : List[float] = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[int] = field()
SCREAMING_SNAKE_CASE : str = field()
SCREAMING_SNAKE_CASE : BasicEnum = field()
def snake_case__( self : Optional[Any] ) ->Tuple:
snake_case_ = BasicEnum(self.required_enum )
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : "BasicEnum" = field()
SCREAMING_SNAKE_CASE : "Optional[bool]" = None
SCREAMING_SNAKE_CASE : "str" = field(default="toto" , metadata={"help": "help message"} )
SCREAMING_SNAKE_CASE : "List[str]" = list_field(default=["Hallo", "Bonjour", "Hello"] )
if is_python_no_less_than_3_10:
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : bool = False
SCREAMING_SNAKE_CASE : bool = True
SCREAMING_SNAKE_CASE : bool | None = None
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : int | None = None
SCREAMING_SNAKE_CASE : float | None = field(default=__A , metadata={"help": "help message"} )
SCREAMING_SNAKE_CASE : str | None = None
SCREAMING_SNAKE_CASE : list[str] | None = list_field(default=[] )
SCREAMING_SNAKE_CASE : list[int] | None = list_field(default=[] )
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : Dict , _UpperCamelCase : argparse.ArgumentParser , _UpperCamelCase : argparse.ArgumentParser ) ->str:
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
snake_case_ = {k: v for k, v in vars(_UpperCamelCase ).items() if k != '''container'''}
snake_case_ = {k: v for k, v in vars(_UpperCamelCase ).items() if k != '''container'''}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('''choices''' , _UpperCamelCase ) and yy.get('''choices''' , _UpperCamelCase ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['''type'''](_UpperCamelCase ) , yy['''type'''](_UpperCamelCase ) )
del xx["type"], yy["type"]
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : Optional[Any] ) ->Dict:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_UpperCamelCase , required=_UpperCamelCase )
expected.add_argument('''--bar''' , type=_UpperCamelCase , required=_UpperCamelCase )
expected.add_argument('''--baz''' , type=_UpperCamelCase , required=_UpperCamelCase )
expected.add_argument('''--flag''' , type=_UpperCamelCase , default=_UpperCamelCase , const=_UpperCamelCase , nargs='''?''' )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
snake_case_ = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5''']
((snake_case_), ) = parser.parse_args_into_dataclasses(_UpperCamelCase , look_for_args_file=_UpperCamelCase )
self.assertFalse(example.flag )
def snake_case__( self : Tuple ) ->Optional[int]:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=4_2 , type=_UpperCamelCase )
expected.add_argument('''--baz''' , default='''toto''' , type=_UpperCamelCase , help='''help message''' )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : Tuple ) ->Tuple:
snake_case_ = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_UpperCamelCase , default=_UpperCamelCase , const=_UpperCamelCase , nargs='''?''' )
expected.add_argument('''--baz''' , type=_UpperCamelCase , default=_UpperCamelCase , const=_UpperCamelCase , nargs='''?''' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('''--no_baz''' , action='''store_false''' , default=_UpperCamelCase , dest='''baz''' )
expected.add_argument('''--opt''' , type=_UpperCamelCase , default=_UpperCamelCase )
snake_case_ = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_UpperCamelCase )
for dataclass_type in dataclass_types:
snake_case_ = HfArgumentParser(_UpperCamelCase )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
snake_case_ = parser.parse_args([] )
self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , baz=_UpperCamelCase , opt=_UpperCamelCase ) )
snake_case_ = parser.parse_args(['''--foo''', '''--no_baz'''] )
self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , baz=_UpperCamelCase , opt=_UpperCamelCase ) )
snake_case_ = parser.parse_args(['''--foo''', '''--baz'''] )
self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , baz=_UpperCamelCase , opt=_UpperCamelCase ) )
snake_case_ = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] )
self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , baz=_UpperCamelCase , opt=_UpperCamelCase ) )
snake_case_ = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] )
self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , baz=_UpperCamelCase , opt=_UpperCamelCase ) )
def snake_case__( self : Tuple ) ->Tuple:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 4_2] , type=make_choice_type_function(['''titi''', '''toto''', 4_2] ) , )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
snake_case_ = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
snake_case_ = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
snake_case_ = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
snake_case_ = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
snake_case_ = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 4_2 )
snake_case_ = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def snake_case__( self : Tuple ) ->Union[str, Any]:
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : Literal["titi", "toto", 42] = "toto"
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 4_2) , type=make_choice_type_function(['''titi''', '''toto''', 4_2] ) , )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
snake_case_ = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
snake_case_ = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
snake_case_ = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 4_2 )
def snake_case__( self : List[str] ) ->int:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = argparse.ArgumentParser()
expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=_UpperCamelCase )
expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=_UpperCamelCase )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_UpperCamelCase )
expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=_UpperCamelCase )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
snake_case_ = parser.parse_args([] )
self.assertEqual(
_UpperCamelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , )
snake_case_ = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() )
self.assertEqual(_UpperCamelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) )
def snake_case__( self : Optional[Any] ) ->List[Any]:
snake_case_ = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=_UpperCamelCase , type=_UpperCamelCase )
expected.add_argument('''--bar''' , default=_UpperCamelCase , type=_UpperCamelCase , help='''help message''' )
expected.add_argument('''--baz''' , default=_UpperCamelCase , type=_UpperCamelCase )
expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=_UpperCamelCase )
expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=_UpperCamelCase )
snake_case_ = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_UpperCamelCase )
for dataclass_type in dataclass_types:
snake_case_ = HfArgumentParser(_UpperCamelCase )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
snake_case_ = parser.parse_args([] )
self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , bar=_UpperCamelCase , baz=_UpperCamelCase , ces=[] , des=[] ) )
snake_case_ = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() )
self.assertEqual(_UpperCamelCase , Namespace(foo=1_2 , bar=3.14 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) )
def snake_case__( self : Union[str, Any] ) ->Optional[int]:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = argparse.ArgumentParser()
expected.add_argument('''--required_list''' , nargs='''+''' , type=_UpperCamelCase , required=_UpperCamelCase )
expected.add_argument('''--required_str''' , type=_UpperCamelCase , required=_UpperCamelCase )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_UpperCamelCase , )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : List[str] ) ->int:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_UpperCamelCase , required=_UpperCamelCase )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_UpperCamelCase , )
expected.add_argument('''--opt''' , type=_UpperCamelCase , default=_UpperCamelCase )
expected.add_argument('''--baz''' , default='''toto''' , type=_UpperCamelCase , help='''help message''' )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_UpperCamelCase )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : Dict ) ->Any:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = {
'''foo''': 1_2,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
snake_case_ = parser.parse_dict(_UpperCamelCase )[0]
snake_case_ = BasicExample(**_UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : int ) ->Dict:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = {
'''foo''': 1_2,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
'''extra''': 4_2,
}
self.assertRaises(_UpperCamelCase , parser.parse_dict , _UpperCamelCase , allow_extra_keys=_UpperCamelCase )
def snake_case__( self : str ) ->Tuple:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = {
'''foo''': 1_2,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ = os.path.join(_UpperCamelCase , '''temp_json''' )
os.mkdir(_UpperCamelCase )
with open(temp_local_path + '''.json''' , '''w+''' ) as f:
json.dump(_UpperCamelCase , _UpperCamelCase )
snake_case_ = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0]
snake_case_ = BasicExample(**_UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : Optional[int] ) ->str:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = {
'''foo''': 1_2,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ = os.path.join(_UpperCamelCase , '''temp_yaml''' )
os.mkdir(_UpperCamelCase )
with open(temp_local_path + '''.yaml''' , '''w+''' ) as f:
yaml.dump(_UpperCamelCase , _UpperCamelCase )
snake_case_ = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0]
snake_case_ = BasicExample(**_UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : Any ) ->Any:
snake_case_ = HfArgumentParser(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase ) | 39 | 0 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , ):
'''simple docstring'''
__A : Optional[int] = parent
__A : str = 13
__A : List[Any] = 7
__A : List[str] = True
__A : str = True
__A : Optional[Any] = True
__A : int = True
__A : Dict = 99
__A : Dict = 384
__A : Any = 2
__A : int = 4
__A : Optional[Any] = 37
__A : Optional[int] = 'gelu'
__A : Dict = 0.1
__A : Optional[int] = 0.1
__A : Any = 512
__A : int = 16
__A : List[str] = 2
__A : str = 0.02
__A : Any = 3
__A : str = 4
__A : Union[str, Any] = 128
__A : int = 2
__A : List[Any] = 9
__A : List[Any] = 1
__A : List[Any] = None
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__A : str = None
if self.use_input_mask:
__A : List[Any] = random_attention_mask([self.batch_size, self.seq_length])
__A : Optional[Any] = None
if self.use_token_type_ids:
__A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__A : Optional[int] = None
__A : List[str] = None
__A : Dict = None
if self.use_labels:
__A : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__A : str = ids_tensor([self.batch_size] , self.num_choices)
__A : List[Any] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : int = TFConvBertModel(config=_UpperCAmelCase)
__A : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__A : Tuple = [input_ids, input_mask]
__A : Any = model(_UpperCAmelCase)
__A : Dict = model(_UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : str = TFConvBertForMaskedLM(config=_UpperCAmelCase)
__A : str = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : str = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[int] = self.num_labels
__A : Any = TFConvBertForSequenceClassification(config=_UpperCAmelCase)
__A : Optional[Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : Dict = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Tuple = self.num_choices
__A : List[str] = TFConvBertForMultipleChoice(config=_UpperCAmelCase)
__A : int = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1))
__A : Optional[Any] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1))
__A : List[Any] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1))
__A : int = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__A : Optional[Any] = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : List[Any] = self.num_labels
__A : List[Any] = TFConvBertForTokenClassification(config=_UpperCAmelCase)
__A : str = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : int = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[Any] = TFConvBertForQuestionAnswering(config=_UpperCAmelCase)
__A : Any = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : Union[str, Any] = model(_UpperCAmelCase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = self.prepare_config_and_inputs()
(
(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,
) : Union[str, Any] = config_and_inputs
__A : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE (a__ , a__ , unittest.TestCase ):
lowerCAmelCase = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCAmelCase = (
{
'''feature-extraction''': TFConvBertModel,
'''fill-mask''': TFConvBertForMaskedLM,
'''question-answering''': TFConvBertForQuestionAnswering,
'''text-classification''': TFConvBertForSequenceClassification,
'''token-classification''': TFConvBertForTokenClassification,
'''zero-shot''': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = TFConvBertModelTester(self)
__A : str = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase)
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__A : List[str] = True
__A : List[str] = True
if hasattr(_UpperCAmelCase , 'use_cache'):
__A : List[Any] = True
__A : str = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length)
__A : Union[str, Any] = getattr(self.model_tester , 'key_length' , _UpperCAmelCase)
for model_class in self.all_model_classes:
__A : List[str] = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase)
__A : Optional[int] = model_class(_UpperCAmelCase)
__A : Optional[Any] = len(model(_UpperCAmelCase))
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_UpperCAmelCase , saved_model=_UpperCAmelCase)
__A : Union[str, Any] = os.path.join(_UpperCAmelCase , 'saved_model' , '1')
__A : Tuple = tf.keras.models.load_model(_UpperCAmelCase)
__A : str = model(_UpperCAmelCase)
if self.is_encoder_decoder:
__A : Optional[int] = outputs['encoder_hidden_states']
__A : str = outputs['encoder_attentions']
else:
__A : List[Any] = outputs['hidden_states']
__A : Optional[Any] = outputs['attentions']
self.assertEqual(len(_UpperCAmelCase) , _UpperCAmelCase)
__A : str = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1)
self.assertEqual(len(_UpperCAmelCase) , _UpperCAmelCase)
self.assertListEqual(
list(output_hidden_states[0].shape[-2:]) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(output_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = TFConvBertModel.from_pretrained('YituTech/conv-bert-base')
self.assertIsNotNone(_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__A : Any = True
__A : str = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length)
__A : Any = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length)
__A : int = getattr(self.model_tester , 'key_length' , _UpperCAmelCase)
__A : Tuple = getattr(self.model_tester , 'key_length' , _UpperCAmelCase)
def check_decoder_attentions_output(_UpperCAmelCase):
__A : List[str] = len(_UpperCAmelCase)
self.assertEqual(out_len % 2 , 0)
__A : Any = outputs.decoder_attentions
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_UpperCAmelCase):
__A : str = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__A : Dict = True
__A : Any = False
__A : str = model_class(_UpperCAmelCase)
__A : List[str] = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : List[str] = len(_UpperCAmelCase)
self.assertEqual(config.output_hidden_states , _UpperCAmelCase)
check_encoder_attentions_output(_UpperCAmelCase)
if self.is_encoder_decoder:
__A : Union[str, Any] = model_class(_UpperCAmelCase)
__A : int = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
self.assertEqual(config.output_hidden_states , _UpperCAmelCase)
check_decoder_attentions_output(_UpperCAmelCase)
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__A : int = True
__A : Tuple = model_class(_UpperCAmelCase)
__A : Dict = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
self.assertEqual(config.output_hidden_states , _UpperCAmelCase)
check_encoder_attentions_output(_UpperCAmelCase)
# Check attention is always last and order is fine
__A : Any = True
__A : str = True
__A : Union[str, Any] = model_class(_UpperCAmelCase)
__A : Union[str, Any] = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_UpperCAmelCase))
self.assertEqual(model.config.output_hidden_states , _UpperCAmelCase)
check_encoder_attentions_output(_UpperCAmelCase)
@require_tf
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = TFConvBertModel.from_pretrained('YituTech/conv-bert-base')
__A : str = tf.constant([[0, 1, 2, 3, 4, 5]])
__A : Optional[int] = model(_UpperCAmelCase)[0]
__A : List[Any] = [1, 6, 768]
self.assertEqual(output.shape , _UpperCAmelCase)
__A : Tuple = tf.constant(
[
[
[-0.03475493, -0.4686034, -0.30638832],
[0.22637248, -0.26988646, -0.7423424],
[0.10324868, -0.45013508, -0.58280784],
]
])
tf.debugging.assert_near(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4) | 8 |
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
lowerCAmelCase_ = logging.get_logger(__name__)
class snake_case_ ( __A ):
'''simple docstring'''
def __init__( self : Dict , *_UpperCamelCase : int , **_UpperCamelCase : Tuple ) ->None:
warnings.warn(
'''The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use ChineseCLIPImageProcessor instead.''' , _UpperCamelCase , )
super().__init__(*_UpperCamelCase , **_UpperCamelCase ) | 39 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def A ( __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
A__ = b.T
A__ = np.sum(np.square(__UpperCamelCase ) , axis=1 )
A__ = np.sum(np.square(__UpperCamelCase ) , axis=0 )
A__ = np.matmul(__UpperCamelCase , __UpperCamelCase )
A__ = aa[:, None] - 2 * ab + ba[None, :]
return d
def A ( __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]:
A__ = x.reshape(-1 , 3 )
A__ = squared_euclidean_distance(__UpperCamelCase , __UpperCamelCase )
return np.argmin(__UpperCamelCase , axis=1 )
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : Dict = ["pixel_values"]
def __init__( self : List[str] , _snake_case : Optional[Union[List[List[int]], np.ndarray]] = None , _snake_case : bool = True , _snake_case : Dict[str, int] = None , _snake_case : PILImageResampling = PILImageResampling.BILINEAR , _snake_case : bool = True , _snake_case : bool = True , **_snake_case : Dict , ):
"""simple docstring"""
super().__init__(**_snake_case )
A__ = size if size is not None else {'height': 2_56, 'width': 2_56}
A__ = get_size_dict(_snake_case )
A__ = np.array(_snake_case ) if clusters is not None else None
A__ = do_resize
A__ = size
A__ = resample
A__ = do_normalize
A__ = do_color_quantize
def _a ( self : int , _snake_case : np.ndarray , _snake_case : Dict[str, int] , _snake_case : PILImageResampling = PILImageResampling.BILINEAR , _snake_case : Optional[Union[str, ChannelDimension]] = None , **_snake_case : str , ):
"""simple docstring"""
A__ = get_size_dict(_snake_case )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size dictionary must contain both height and width keys. Got {size.keys()}''' )
return resize(
_snake_case , size=(size['height'], size['width']) , resample=_snake_case , data_format=_snake_case , **_snake_case )
def _a ( self : Any , _snake_case : np.ndarray , _snake_case : Optional[Union[str, ChannelDimension]] = None , ):
"""simple docstring"""
A__ = rescale(image=_snake_case , scale=1 / 127.5 , data_format=_snake_case )
A__ = image - 1
return image
def _a ( self : List[Any] , _snake_case : ImageInput , _snake_case : bool = None , _snake_case : Dict[str, int] = None , _snake_case : PILImageResampling = None , _snake_case : bool = None , _snake_case : Optional[bool] = None , _snake_case : Optional[Union[List[List[int]], np.ndarray]] = None , _snake_case : Optional[Union[str, TensorType]] = None , _snake_case : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST , **_snake_case : int , ):
"""simple docstring"""
A__ = do_resize if do_resize is not None else self.do_resize
A__ = size if size is not None else self.size
A__ = get_size_dict(_snake_case )
A__ = resample if resample is not None else self.resample
A__ = do_normalize if do_normalize is not None else self.do_normalize
A__ = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
A__ = clusters if clusters is not None else self.clusters
A__ = np.array(_snake_case )
A__ = make_list_of_images(_snake_case )
if not valid_images(_snake_case ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_color_quantize and clusters is None:
raise ValueError('Clusters must be specified if do_color_quantize is True.' )
# All transformations expect numpy arrays.
A__ = [to_numpy_array(_snake_case ) for image in images]
if do_resize:
A__ = [self.resize(image=_snake_case , size=_snake_case , resample=_snake_case ) for image in images]
if do_normalize:
A__ = [self.normalize(image=_snake_case ) for image in images]
if do_color_quantize:
A__ = [to_channel_dimension_format(_snake_case , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
A__ = np.array(_snake_case )
A__ = color_quantize(_snake_case , _snake_case ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
A__ = images.shape[0]
A__ = images.reshape(_snake_case , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
A__ = list(_snake_case )
else:
A__ = [to_channel_dimension_format(_snake_case , _snake_case ) for image in images]
A__ = {'input_ids': images}
return BatchFeature(data=_snake_case , tensor_type=_snake_case )
| 9 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = "rwkv"
SCREAMING_SNAKE_CASE : Any = {"max_position_embeddings": "context_length"}
def __init__( self : Union[str, Any] , _UpperCamelCase : Any=5_0_2_7_7 , _UpperCamelCase : Optional[int]=1_0_2_4 , _UpperCamelCase : Optional[int]=4_0_9_6 , _UpperCamelCase : str=3_2 , _UpperCamelCase : Tuple=None , _UpperCamelCase : Dict=None , _UpperCamelCase : Optional[int]=1e-5 , _UpperCamelCase : Any=0 , _UpperCamelCase : Optional[Any]=0 , _UpperCamelCase : int=6 , _UpperCamelCase : Dict=False , _UpperCamelCase : Optional[int]=True , **_UpperCamelCase : int , ) ->List[str]:
snake_case_ = vocab_size
snake_case_ = context_length
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = attention_hidden_size if attention_hidden_size is not None else hidden_size
snake_case_ = intermediate_size if intermediate_size is not None else 4 * hidden_size
snake_case_ = layer_norm_epsilon
snake_case_ = rescale_every
snake_case_ = use_cache
snake_case_ = bos_token_id
snake_case_ = eos_token_id
super().__init__(
tie_word_embeddings=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase ) | 39 | 0 |
def _snake_case ( __snake_case , __snake_case ):
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
_UpperCamelCase = (boundary[1] - boundary[0]) / steps
_UpperCamelCase = boundary[0]
_UpperCamelCase = boundary[1]
_UpperCamelCase = make_points(__snake_case , __snake_case , __snake_case )
_UpperCamelCase = 0.0
y += (h / 2.0) * f(__snake_case )
for i in x_i:
# print(i)
y += h * f(__snake_case )
y += (h / 2.0) * f(__snake_case )
return y
def _snake_case ( __snake_case , __snake_case , __snake_case ):
_UpperCamelCase = a + h
while x < (b - h):
yield x
_UpperCamelCase = x + h
def _snake_case ( __snake_case ): # enter your function here
_UpperCamelCase = (x - 0) * (x - 0)
return y
def _snake_case ( ):
_UpperCamelCase = 0.0 # Lower bound of integration
_UpperCamelCase = 1.0 # Upper bound of integration
_UpperCamelCase = 10.0 # define number of steps or resolution
_UpperCamelCase = [a, b] # define boundary of integration
_UpperCamelCase = method_a(__snake_case , __snake_case )
print(f"""y = {y}""" )
if __name__ == "__main__":
main()
| 10 |
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
lowerCAmelCase_ = get_logger(__name__)
class snake_case_ :
'''simple docstring'''
def __init__( self : int , _UpperCamelCase : Optional[str] = None ) ->Tuple:
snake_case_ = (
os.path.join(_UpperCamelCase , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
snake_case_ = Extractor
def snake_case__( self : Any , _UpperCamelCase : str ) ->str:
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
snake_case_ = os.path.abspath(_UpperCamelCase )
return os.path.join(self.extract_dir , hash_url_to_filename(_UpperCamelCase ) )
def snake_case__( self : int , _UpperCamelCase : str , _UpperCamelCase : bool ) ->bool:
return force_extract or (
not os.path.isfile(_UpperCamelCase ) and not (os.path.isdir(_UpperCamelCase ) and os.listdir(_UpperCamelCase ))
)
def snake_case__( self : Tuple , _UpperCamelCase : str , _UpperCamelCase : bool = False ) ->str:
snake_case_ = self.extractor.infer_extractor_format(_UpperCamelCase )
if not extractor_format:
return input_path
snake_case_ = self._get_output_path(_UpperCamelCase )
if self._do_extract(_UpperCamelCase , _UpperCamelCase ):
self.extractor.extract(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return output_path
class snake_case_ ( __A ):
'''simple docstring'''
@classmethod
@abstractmethod
def snake_case__( cls : Optional[int] , _UpperCamelCase : Union[Path, str] , **_UpperCamelCase : str ) ->bool:
...
@staticmethod
@abstractmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
...
class snake_case_ ( __A , __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[bytes] = []
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : int ) ->List[Any]:
with open(_UpperCamelCase , '''rb''' ) as f:
return f.read(_UpperCamelCase )
@classmethod
def snake_case__( cls : Union[str, Any] , _UpperCamelCase : Union[Path, str] , _UpperCamelCase : bytes = b"" ) ->bool:
if not magic_number:
snake_case_ = max(len(_UpperCamelCase ) for cls_magic_number in cls.magic_numbers )
try:
snake_case_ = cls.read_magic_number(_UpperCamelCase , _UpperCamelCase )
except OSError:
return False
return any(magic_number.startswith(_UpperCamelCase ) for cls_magic_number in cls.magic_numbers )
class snake_case_ ( __A ):
'''simple docstring'''
@classmethod
def snake_case__( cls : Union[str, Any] , _UpperCamelCase : Union[Path, str] , **_UpperCamelCase : Any ) ->bool:
return tarfile.is_tarfile(_UpperCamelCase )
@staticmethod
def snake_case__( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Dict ) ->List[str]:
def resolved(_UpperCamelCase : str ) -> str:
return os.path.realpath(os.path.abspath(_UpperCamelCase ) )
def badpath(_UpperCamelCase : str , _UpperCamelCase : str ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(_UpperCamelCase , _UpperCamelCase ) ).startswith(_UpperCamelCase )
def badlink(_UpperCamelCase : Tuple , _UpperCamelCase : str ) -> bool:
# Links are interpreted relative to the directory containing the link
snake_case_ = resolved(os.path.join(_UpperCamelCase , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=_UpperCamelCase )
snake_case_ = resolved(_UpperCamelCase )
for finfo in members:
if badpath(finfo.name , _UpperCamelCase ):
logger.error(f'''Extraction of {finfo.name} is blocked (illegal path)''' )
elif finfo.issym() and badlink(_UpperCamelCase , _UpperCamelCase ):
logger.error(f'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''' )
elif finfo.islnk() and badlink(_UpperCamelCase , _UpperCamelCase ):
logger.error(f'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''' )
else:
yield finfo
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
snake_case_ = tarfile.open(_UpperCamelCase )
tar_file.extractall(_UpperCamelCase , members=TarExtractor.safemembers(_UpperCamelCase , _UpperCamelCase ) )
tar_file.close()
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = [b"\x1F\x8B"]
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
with gzip.open(_UpperCamelCase , '''rb''' ) as gzip_file:
with open(_UpperCamelCase , '''wb''' ) as extracted_file:
shutil.copyfileobj(_UpperCamelCase , _UpperCamelCase )
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = [
b"PK\x03\x04",
b"PK\x05\x06", # empty archive
b"PK\x07\x08", # spanned archive
]
@classmethod
def snake_case__( cls : List[str] , _UpperCamelCase : Union[Path, str] , _UpperCamelCase : bytes = b"" ) ->bool:
if super().is_extractable(_UpperCamelCase , magic_number=_UpperCamelCase ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(_UpperCamelCase , '''rb''' ) as fp:
snake_case_ = _EndRecData(_UpperCamelCase )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
snake_case_ = fp.read(_UpperCamelCase ) # CD is where we expect it to be
if len(_UpperCamelCase ) == sizeCentralDir:
snake_case_ = struct.unpack(_UpperCamelCase , _UpperCamelCase ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
with zipfile.ZipFile(_UpperCamelCase , '''r''' ) as zip_file:
zip_file.extractall(_UpperCamelCase )
zip_file.close()
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = [b"\xFD\x37\x7A\x58\x5A\x00"]
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
with lzma.open(_UpperCamelCase ) as compressed_file:
with open(_UpperCamelCase , '''wb''' ) as extracted_file:
shutil.copyfileobj(_UpperCamelCase , _UpperCamelCase )
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = [b"Rar!\x1a\x07\x00", b"Rar!\x1a\x07\x01\x00"] # RAR_ID # RAR5_ID
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
if not config.RARFILE_AVAILABLE:
raise ImportError('''Please pip install rarfile''' )
import rarfile
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
snake_case_ = rarfile.RarFile(_UpperCamelCase )
rf.extractall(_UpperCamelCase )
rf.close()
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = [b"\x28\xb5\x2F\xFD"]
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
if not config.ZSTANDARD_AVAILABLE:
raise ImportError('''Please pip install zstandard''' )
import zstandard as zstd
snake_case_ = zstd.ZstdDecompressor()
with open(_UpperCamelCase , '''rb''' ) as ifh, open(_UpperCamelCase , '''wb''' ) as ofh:
dctx.copy_stream(_UpperCamelCase , _UpperCamelCase )
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = [b"\x42\x5A\x68"]
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
with bza.open(_UpperCamelCase , '''rb''' ) as compressed_file:
with open(_UpperCamelCase , '''wb''' ) as extracted_file:
shutil.copyfileobj(_UpperCamelCase , _UpperCamelCase )
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = [b"\x37\x7A\xBC\xAF\x27\x1C"]
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
if not config.PY7ZR_AVAILABLE:
raise ImportError('''Please pip install py7zr''' )
import pyazr
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
with pyazr.SevenZipFile(_UpperCamelCase , '''r''' ) as archive:
archive.extractall(_UpperCamelCase )
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = [b"\x04\x22\x4D\x18"]
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
if not config.LZ4_AVAILABLE:
raise ImportError('''Please pip install lz4''' )
import lza.frame
with lza.frame.open(_UpperCamelCase , '''rb''' ) as compressed_file:
with open(_UpperCamelCase , '''wb''' ) as extracted_file:
shutil.copyfileobj(_UpperCamelCase , _UpperCamelCase )
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def snake_case__( cls : List[Any] ) ->List[str]:
return max(
len(_UpperCamelCase )
for extractor in cls.extractors.values()
if issubclass(_UpperCamelCase , _UpperCamelCase )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : int ) ->Tuple:
try:
return MagicNumberBaseExtractor.read_magic_number(_UpperCamelCase , magic_number_length=_UpperCamelCase )
except OSError:
return b""
@classmethod
def snake_case__( cls : Optional[Any] , _UpperCamelCase : Union[Path, str] , _UpperCamelCase : bool = False ) ->bool:
warnings.warn(
'''Method \'is_extractable\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '''
'''Use \'infer_extractor_format\' instead.''' , category=_UpperCamelCase , )
snake_case_ = cls.infer_extractor_format(_UpperCamelCase )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def snake_case__( cls : int , _UpperCamelCase : Union[Path, str] ) ->str: # <Added version="2.4.0"/>
snake_case_ = cls._get_magic_number_max_length()
snake_case_ = cls._read_magic_number(_UpperCamelCase , _UpperCamelCase )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(_UpperCamelCase , magic_number=_UpperCamelCase ):
return extractor_format
@classmethod
def snake_case__( cls : Optional[int] , _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Optional[str] = None , _UpperCamelCase : Optional[BaseExtractor] = "deprecated" , ) ->None:
os.makedirs(os.path.dirname(_UpperCamelCase ) , exist_ok=_UpperCamelCase )
# Prevent parallel extractions
snake_case_ = str(Path(_UpperCamelCase ).with_suffix('''.lock''' ) )
with FileLock(_UpperCamelCase ):
shutil.rmtree(_UpperCamelCase , ignore_errors=_UpperCamelCase )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(_UpperCamelCase , _UpperCamelCase ): # passed as positional arg
warnings.warn(
'''Parameter \'extractor\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '''
'''Use \'extractor_format\' instead.''' , category=_UpperCamelCase , )
snake_case_ = extractor if extractor != '''deprecated''' else extractor_format
else:
snake_case_ = cls.extractors[extractor_format]
return extractor.extract(_UpperCamelCase , _UpperCamelCase )
else:
warnings.warn(
'''Parameter \'extractor_format\' was made required in version 2.4.0 and not passing it will raise an '''
'''exception in 3.0.0.''' , category=_UpperCamelCase , )
for extractor in cls.extractors.values():
if extractor.is_extractable(_UpperCamelCase ):
return extractor.extract(_UpperCamelCase , _UpperCamelCase ) | 39 | 0 |
'''simple docstring'''
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def lowerCAmelCase (__A , __A):
"""simple docstring"""
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
_a = flax_key_tuple[:-1] + ('''weight''',)
_a = torch.permute(__A , (0, 2, 1))
elif flax_key_tuple[-1] == "kernel" and ".".join(__A):
# linear layer
_a = flax_key_tuple[:-1] + ('''weight''',)
_a = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
_a = flax_key_tuple[:-1] + ('''weight''',)
return flax_key_tuple, flax_tensor
def lowerCAmelCase (__A , __A , __A):
"""simple docstring"""
if "metadata" in layer:
_a = layer.split('''metadata''')
_a = ''''''.join(split_layer[0])[:-1]
_a = [tuple(('''metadata''' + split_layer[1]).split('''/'''))]
elif "kvstore" in layer:
_a = layer.split('''kvstore''')
_a = ''''''.join(split_layer[0])[:-1]
_a = [tuple(('''kvstore''' + split_layer[1]).split('''/'''))]
else:
_a = layer.split('''/''')
_a = '''/'''.join(split_layer[:-1])
_a = (split_layer[-1],)
if "kvstore/path" in layer:
_a = F'''{switch_checkpoint_path}/{checkpoint_info[layer]}'''
elif "kvstore/driver" in layer:
_a = '''file'''
else:
_a = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def lowerCAmelCase (__A , __A):
"""simple docstring"""
_a = rename_keys(__A)
_a = {}
for k, v in current_block.items():
_a = v
_a = new_current_block
torch.save(__A , __A)
def lowerCAmelCase (__A , __A , __A , __A , __A = WEIGHTS_NAME):
"""simple docstring"""
_a = convert_file_size_to_int(__A)
_a = []
_a = {}
_a = 0
_a = 0
os.makedirs(__A , exist_ok=__A)
with gfile.GFile(switch_checkpoint_path + '''/checkpoint''' , '''rb''') as fp:
_a = serialization.msgpack_restore(fp.read())['''optimizer''']['''target''']
_a = flatten_dict(__A , sep='''/''')
_a = {}
for layer in checkpoint_info.keys():
_a , _a , _a = get_key_and_tensorstore_dict(
__A , __A , __A)
if curr_real_layer_name in all_layers:
_a = content
else:
_a = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
_a = ts.open(unflatten_dict(all_layers[key])).result().read().result()
_a = torch.tensor(__A)
_a = raw_weights.numel() * dtype_byte_size(raw_weights.dtype)
# use the renaming pattern from the small conversion scripts
_a , _a = rename_base_flax_keys(tuple(key.split('''/''')) , __A)
_a = '''/'''.join(__A)
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
_a = os.path.join(
__A , weights_name.replace('''.bin''' , F'''-{len(__A)+1:05d}-of-???.bin'''))
rename_and_save_block(__A , __A)
sharded_state_dicts.append(current_block.keys())
del current_block
_a = {}
_a = 0
_a = raw_weights.to(getattr(__A , __A))
current_block_size += weight_size
total_size += weight_size
# Add the last block
_a = os.path.join(__A , weights_name.replace('''.bin''' , F'''-{len(__A)+1:05d}-of-???.bin'''))
rename_and_save_block(__A , __A)
sharded_state_dicts.append(current_block.keys())
# If we only have one shard, we return it
if len(__A) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
_a = {}
_a = {}
for idx, shard in enumerate(__A):
_a = weights_name.replace(
'''.bin''' , F'''-{idx+1:05d}-of-{len(__A):05d}.bin''') # len(sharded_state_dicts):05d}
_a = os.path.join(__A , weights_name.replace('''.bin''' , F'''-{idx+1:05d}-of-???.bin'''))
os.rename(__A , os.path.join(__A , __A))
_a = shard
for key in shard:
_a = shard_file
# Add the metadata
_a = {'''total_size''': total_size}
_a = {'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(__A , __A) , '''w''' , encoding='''utf-8''') as f:
_a = json.dumps(__A , indent=2 , sort_keys=__A) + '''\n'''
f.write(__A)
return metadata, index
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--max_shard_size", default="10GB", required=False, help="Max shard size")
parser.add_argument("--dtype", default="bfloat16", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted",
type=str,
required=False,
help="Path to the output pytorch model.",
)
lowercase_ = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def lowerCAmelCase ():
"""simple docstring"""
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
_a = SwitchTransformersConfig.from_pretrained('''google/switch-base-8''')
config.save_pretrained('''/home/arthur_huggingface_co/transformers/switch_converted''')
_a = SwitchTransformersForConditionalGeneration.from_pretrained(
'''/home/arthur_huggingface_co/transformers/switch_converted''' , device_map='''auto''')
_a = TaTokenizer.from_pretrained('''t5-small''')
_a = '''A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.'''
_a = tokenizer(__A , return_tensors='''pt''').input_ids
_a = model.generate(__A , decoder_start_token_id=0)
print(tokenizer.decode(out[0]))
| 11 |
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
if any(not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or x < 0 for x in sequence ):
raise TypeError('''Sequence must be list of non-negative integers''' )
for _ in range(len(SCREAMING_SNAKE_CASE__ ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(SCREAMING_SNAKE_CASE__ , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9] | 39 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase__ : str = {
"""configuration_altclip""": [
"""ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""AltCLIPConfig""",
"""AltCLIPTextConfig""",
"""AltCLIPVisionConfig""",
],
"""processing_altclip""": ["""AltCLIPProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Union[str, Any] = [
"""ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AltCLIPPreTrainedModel""",
"""AltCLIPModel""",
"""AltCLIPTextModel""",
"""AltCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
lowerCamelCase__ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 12 |
import re
from filelock import FileLock
try:
import nltk
lowerCAmelCase_ = True
except (ImportError, ModuleNotFoundError):
lowerCAmelCase_ = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
re.sub('''<n>''' , '''''' , SCREAMING_SNAKE_CASE__ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(SCREAMING_SNAKE_CASE__ ) ) | 39 | 0 |
'''simple docstring'''
def UpperCAmelCase__ ( ) -> int:
return 1
def UpperCAmelCase__ ( UpperCAmelCase_ : int ) -> int:
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def UpperCAmelCase__ ( UpperCAmelCase_ : int ) -> int:
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(UpperCAmelCase_ )
def UpperCAmelCase__ ( UpperCAmelCase_ : int ) -> int:
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(UpperCAmelCase_ )
def UpperCAmelCase__ ( UpperCAmelCase_ : int ) -> int:
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(UpperCAmelCase_ )
def UpperCAmelCase__ ( UpperCAmelCase_ : int ) -> int:
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(UpperCAmelCase_ )
def UpperCAmelCase__ ( UpperCAmelCase_ : int ) -> int:
return 0 if x < 0 else one_pound(x - 1_00 ) + fifty_pence(UpperCAmelCase_ )
def UpperCAmelCase__ ( UpperCAmelCase_ : int ) -> int:
return 0 if x < 0 else two_pound(x - 2_00 ) + one_pound(UpperCAmelCase_ )
def UpperCAmelCase__ ( UpperCAmelCase_ : int = 2_00 ) -> int:
return two_pound(UpperCAmelCase_ )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 13 |
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = [0 for i in range(r + 1 )]
# nc0 = 1
snake_case_ = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
snake_case_ = min(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5)) | 39 | 0 |
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __UpperCAmelCase ( __a : Tuple ,__a : Dict ,__a : List[str] ,__a : Optional[Any] ,__a : Tuple ) -> Dict:
"""simple docstring"""
with open(__a ) as metadata_file:
_a : Optional[Any] = json.load(__a )
_a : List[Any] = LukeConfig(use_entity_aware_attention=__a ,**metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
_a : Optional[Any] = torch.load(__a ,map_location='''cpu''' )['''module''']
# Load the entity vocab file
_a : Any = load_original_entity_vocab(__a )
# add an entry for [MASK2]
_a : Union[str, Any] = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
_a : Dict = XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
_a : Optional[int] = AddedToken('''<ent>''' ,lstrip=__a ,rstrip=__a )
_a : Tuple = AddedToken('''<ent2>''' ,lstrip=__a ,rstrip=__a )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(__a )
with open(os.path.join(__a ,'''tokenizer_config.json''' ) ,'''r''' ) as f:
_a : List[str] = json.load(__a )
_a : Tuple = '''MLukeTokenizer'''
with open(os.path.join(__a ,'''tokenizer_config.json''' ) ,'''w''' ) as f:
json.dump(__a ,__a )
with open(os.path.join(__a ,MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) ,'''w''' ) as f:
json.dump(__a ,__a )
_a : Optional[int] = MLukeTokenizer.from_pretrained(__a )
# Initialize the embeddings of the special tokens
_a : str = tokenizer.convert_tokens_to_ids(['''@'''] )[0]
_a : Tuple = tokenizer.convert_tokens_to_ids(['''#'''] )[0]
_a : Any = state_dict['''embeddings.word_embeddings.weight''']
_a : Optional[int] = word_emb[ent_init_index].unsqueeze(0 )
_a : Any = word_emb[enta_init_index].unsqueeze(0 )
_a : Union[str, Any] = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
_a : Tuple = state_dict[bias_name]
_a : Optional[Any] = decoder_bias[ent_init_index].unsqueeze(0 )
_a : Optional[int] = decoder_bias[enta_init_index].unsqueeze(0 )
_a : Dict = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_a : Tuple = F"""encoder.layer.{layer_index}.attention.self."""
_a : List[Any] = state_dict[prefix + matrix_name]
_a : Dict = state_dict[prefix + matrix_name]
_a : List[Any] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_a : Union[str, Any] = state_dict['''entity_embeddings.entity_embeddings.weight''']
_a : Optional[int] = entity_emb[entity_vocab['''[MASK]''']].unsqueeze(0 )
_a : Any = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
_a : int = state_dict['''entity_predictions.bias''']
_a : int = entity_prediction_bias[entity_vocab['''[MASK]''']].unsqueeze(0 )
_a : Optional[Any] = torch.cat([entity_prediction_bias, entity_mask_bias] )
_a : Optional[int] = LukeForMaskedLM(config=__a ).eval()
state_dict.pop('''entity_predictions.decoder.weight''' )
state_dict.pop('''lm_head.decoder.weight''' )
state_dict.pop('''lm_head.decoder.bias''' )
_a : int = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )):
_a : Optional[int] = state_dict[key]
else:
_a : Tuple = state_dict[key]
_a , _a : int = model.load_state_dict(__a ,strict=__a )
if set(__a ) != {"luke.embeddings.position_ids"}:
raise ValueError(F"""Unexpected unexpected_keys: {unexpected_keys}""" )
if set(__a ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F"""Unexpected missing_keys: {missing_keys}""" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
_a : Optional[int] = MLukeTokenizer.from_pretrained(__a ,task='''entity_classification''' )
_a : int = '''ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'''
_a : List[Any] = (0, 9)
_a : Tuple = tokenizer(__a ,entity_spans=[span] ,return_tensors='''pt''' )
_a : int = model(**__a )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_a : List[str] = torch.Size((1, 33, 768) )
_a : Union[str, Any] = torch.tensor([[0.08_92, 0.05_96, -0.28_19], [0.01_34, 0.11_99, 0.05_73], [-0.01_69, 0.09_27, 0.06_44]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,__a ,atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_a : str = torch.Size((1, 1, 768) )
_a : List[Any] = torch.tensor([[-0.14_82, 0.06_09, 0.03_22]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
F""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,__a ,atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
_a : Optional[int] = MLukeTokenizer.from_pretrained(__a )
_a : Dict = '''Tokyo is the capital of <mask>.'''
_a : List[str] = (24, 30)
_a : Optional[int] = tokenizer(__a ,entity_spans=[span] ,return_tensors='''pt''' )
_a : Optional[Any] = model(**__a )
_a : Any = encoding['''input_ids'''][0].tolist()
_a : Optional[Any] = input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) )
_a : Any = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(__a )
_a : Any = outputs.entity_logits[0][0].argmax().item()
_a : Optional[Any] = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(__a ) )
model.save_pretrained(__a )
def __UpperCAmelCase ( __a : List[Any] ) -> int:
"""simple docstring"""
_a : Union[str, Any] = ['''[MASK]''', '''[PAD]''', '''[UNK]''']
_a : int = [json.loads(__a ) for line in open(__a )]
_a : List[Any] = {}
for entry in data:
_a : int = entry['''id''']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
_a : List[Any] = entity_id
break
_a : Dict = F"""{language}:{entity_name}"""
_a : int = entity_id
return new_mapping
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
a__ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 14 |
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
lowerCAmelCase_ = {
'''gwf-440k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/gwf-440k.ckpt''',
'''sample_rate''': 4_80_00,
'''sample_size''': 6_55_36,
},
'''jmann-small-190k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt''',
'''sample_rate''': 4_80_00,
'''sample_size''': 6_55_36,
},
'''jmann-large-580k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt''',
'''sample_rate''': 4_80_00,
'''sample_size''': 13_10_72,
},
'''maestro-uncond-150k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt''',
'''sample_rate''': 1_60_00,
'''sample_size''': 6_55_36,
},
'''unlocked-uncond-250k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt''',
'''sample_rate''': 1_60_00,
'''sample_size''': 6_55_36,
},
'''honk-140k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/honk-140k.ckpt''',
'''sample_rate''': 1_60_00,
'''sample_size''': 6_55_36,
},
}
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return torch.atana(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) / math.pi * 2
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = torch.sin(t * math.pi / 2 ) ** 2
snake_case_ = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
class snake_case_ ( __A ):
'''simple docstring'''
pass
class snake_case_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , _UpperCamelCase : int ) ->Optional[int]:
super().__init__()
snake_case_ = DiffusionAttnUnetaD(_UpperCamelCase , n_attn_layers=4 )
snake_case_ = deepcopy(self.diffusion )
snake_case_ = torch.quasirandom.SobolEngine(1 , scramble=_UpperCamelCase )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = MODELS_MAP[model_name]['''url''']
os.system(F'''wget {url} ./''' )
return F'''./{model_name}.ckpt'''
lowerCAmelCase_ = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
}
lowerCAmelCase_ = {
'''8''': '''resnets.0''',
'''9''': '''attentions.0''',
'''10''': '''resnets.1''',
'''11''': '''attentions.1''',
'''12''': '''resnets.2''',
'''13''': '''attentions.2''',
}
lowerCAmelCase_ = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
'''8''': '''resnets.3''',
'''9''': '''attentions.3''',
'''10''': '''resnets.4''',
'''11''': '''attentions.4''',
'''12''': '''resnets.5''',
'''13''': '''attentions.5''',
}
lowerCAmelCase_ = {
'''0''': '''resnets.0''',
'''1''': '''resnets.1''',
'''2''': '''resnets.2''',
'''4''': '''resnets.0''',
'''5''': '''resnets.1''',
'''6''': '''resnets.2''',
}
lowerCAmelCase_ = {
'''skip''': '''conv_skip''',
'''main.0''': '''conv_1''',
'''main.1''': '''group_norm_1''',
'''main.3''': '''conv_2''',
'''main.4''': '''group_norm_2''',
}
lowerCAmelCase_ = {
'''norm''': '''group_norm''',
'''qkv_proj''': ['''query''', '''key''', '''value'''],
'''out_proj''': ['''proj_attn'''],
}
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
if name.startswith('''skip''' ):
return name.replace('''skip''' , RES_CONV_MAP['''skip'''] )
# name has to be of format main.{digit}
if not name.startswith('''main.''' ):
raise ValueError(F'''ResConvBlock error with {name}''' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
for key, value in ATTN_MAP.items():
if name.startswith(SCREAMING_SNAKE_CASE__ ) and not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return name.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif name.startswith(SCREAMING_SNAKE_CASE__ ):
return [name.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for v in value]
raise ValueError(F'''Attn error with {name}''' )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=13 ):
snake_case_ = input_string
if string.split('''.''' )[0] == "timestep_embed":
return string.replace('''timestep_embed''' , '''time_proj''' )
snake_case_ = 0
if string.startswith('''net.3.''' ):
depth += 1
snake_case_ = string[6:]
elif string.startswith('''net.''' ):
snake_case_ = string[4:]
while string.startswith('''main.7.''' ):
depth += 1
snake_case_ = string[7:]
if string.startswith('''main.''' ):
snake_case_ = string[5:]
# mid block
if string[:2].isdigit():
snake_case_ = string[:2]
snake_case_ = string[2:]
else:
snake_case_ = string[0]
snake_case_ = string[1:]
if depth == max_depth:
snake_case_ = MID_NUM_TO_LAYER[layer_num]
snake_case_ = '''mid_block'''
elif depth > 0 and int(SCREAMING_SNAKE_CASE__ ) < 7:
snake_case_ = DOWN_NUM_TO_LAYER[layer_num]
snake_case_ = F'''down_blocks.{depth}'''
elif depth > 0 and int(SCREAMING_SNAKE_CASE__ ) > 7:
snake_case_ = UP_NUM_TO_LAYER[layer_num]
snake_case_ = F'''up_blocks.{max_depth - depth - 1}'''
elif depth == 0:
snake_case_ = DEPTH_0_TO_LAYER[layer_num]
snake_case_ = F'''up_blocks.{max_depth - 1}''' if int(SCREAMING_SNAKE_CASE__ ) > 3 else '''down_blocks.0'''
if not string_left.startswith('''.''' ):
raise ValueError(F'''Naming error with {input_string} and string_left: {string_left}.''' )
snake_case_ = string_left[1:]
if "resnets" in new_layer:
snake_case_ = convert_resconv_naming(SCREAMING_SNAKE_CASE__ )
elif "attentions" in new_layer:
snake_case_ = convert_attn_naming(SCREAMING_SNAKE_CASE__ )
snake_case_ = new_string_left
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = prefix + '''.''' + new_layer + '''.''' + string_left
else:
snake_case_ = [prefix + '''.''' + new_layer + '''.''' + s for s in string_left]
return new_string
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = {}
for k, v in state_dict.items():
if k.endswith('''kernel''' ):
# up- and downsample layers, don't have trainable weights
continue
snake_case_ = rename(SCREAMING_SNAKE_CASE__ )
# check if we need to transform from Conv => Linear for attention
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = transform_conv_attns(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
snake_case_ = v
return new_state_dict
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if len(SCREAMING_SNAKE_CASE__ ) == 1:
if len(v.shape ) == 3:
# weight
snake_case_ = v[:, :, 0]
else:
# bias
snake_case_ = v
else:
# qkv matrices
snake_case_ = v.shape[0]
snake_case_ = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
snake_case_ = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
snake_case_ = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
snake_case_ = args.model_path.split('''/''' )[-1].split('''.''' )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), F'''Make sure to provide one of the official model names {MODELS_MAP.keys()}'''
snake_case_ = download(SCREAMING_SNAKE_CASE__ )
snake_case_ = MODELS_MAP[model_name]['''sample_rate''']
snake_case_ = MODELS_MAP[model_name]['''sample_size''']
snake_case_ = Object()
snake_case_ = sample_size
snake_case_ = sample_rate
snake_case_ = 0
snake_case_ = UNetaDModel(sample_size=SCREAMING_SNAKE_CASE__ , sample_rate=SCREAMING_SNAKE_CASE__ )
snake_case_ = diffusers_model.state_dict()
snake_case_ = DiffusionUncond(SCREAMING_SNAKE_CASE__ )
orig_model.load_state_dict(torch.load(args.model_path , map_location=SCREAMING_SNAKE_CASE__ )['''state_dict'''] )
snake_case_ = orig_model.diffusion_ema.eval()
snake_case_ = orig_model.state_dict()
snake_case_ = rename_orig_weights(SCREAMING_SNAKE_CASE__ )
snake_case_ = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
snake_case_ = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(SCREAMING_SNAKE_CASE__ ) == 0, F'''Problem with {renamed_minus_diffusers}'''
assert all(k.endswith('''kernel''' ) for k in list(SCREAMING_SNAKE_CASE__ ) ), F'''Problem with {diffusers_minus_renamed}'''
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), F'''Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'''
if key == "time_proj.weight":
snake_case_ = value.squeeze()
snake_case_ = value
diffusers_model.load_state_dict(SCREAMING_SNAKE_CASE__ )
snake_case_ = 100
snake_case_ = 33
snake_case_ = IPNDMScheduler(num_train_timesteps=SCREAMING_SNAKE_CASE__ )
snake_case_ = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
snake_case_ = torch.randn([1, 2, config.sample_size] , generator=SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
snake_case_ = torch.linspace(1 , 0 , steps + 1 , device=SCREAMING_SNAKE_CASE__ )[:-1]
snake_case_ = get_crash_schedule(SCREAMING_SNAKE_CASE__ )
snake_case_ = DanceDiffusionPipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
snake_case_ = torch.manual_seed(33 )
snake_case_ = pipe(num_inference_steps=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ ).audios
snake_case_ = sampling.iplms_sample(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , {} )
snake_case_ = generated.clamp(-1 , 1 )
snake_case_ = (generated - audio).abs().sum()
snake_case_ = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print('''Diff sum''' , SCREAMING_SNAKE_CASE__ )
print('''Diff max''' , SCREAMING_SNAKE_CASE__ )
assert diff_max < 1E-3, F'''Diff max: {diff_max} is too much :-/'''
print(F'''Conversion for {model_name} successful!''' )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
lowerCAmelCase_ = parser.parse_args()
main(args) | 39 | 0 |
def UpperCamelCase ( __magic_name__ : list ) -> list:
"""simple docstring"""
def merge(__magic_name__ : list , __magic_name__ : list ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(__magic_name__ ) <= 1:
return collection
lowercase__ = len(__magic_name__ ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
A : Any = input('Enter numbers separated by a comma:\n').strip()
A : str = [int(item) for item in user_input.split(',')]
print(*merge_sort(unsorted), sep=',')
| 15 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {'''configuration_vit_msn''': ['''VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMSNConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMSNModel''',
'''ViTMSNForImageClassification''',
'''ViTMSNPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 39 | 0 |
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def __a ( A__ : Tuple ):
for param in module.parameters():
SCREAMING_SNAKE_CASE = False
def __a ( ):
SCREAMING_SNAKE_CASE = "cuda" if torch.cuda.is_available() else "cpu"
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
SCREAMING_SNAKE_CASE = "mps"
if device == "mps":
print(
"WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"
" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"
" with generations." )
return device
def __a ( A__ : Tuple ):
SCREAMING_SNAKE_CASE = plt.imshow(A__ )
fig.axes.get_xaxis().set_visible(A__ )
fig.axes.get_yaxis().set_visible(A__ )
plt.show()
def __a ( ):
SCREAMING_SNAKE_CASE = datetime.now()
SCREAMING_SNAKE_CASE = current_time.strftime("%H:%M:%S" )
return timestamp | 16 |
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : Optional[Any] ) ->Any:
snake_case_ = tf.convert_to_tensor(
[
[
8.2220991, # 3rd highest value; idx. 0
-0.5620044,
5.23229752,
4.0386393,
-6.8798378,
-0.54785802,
-3.2012153,
2.92777176,
1.88171953,
7.35341276, # 5th highest value; idx. 9
8.43207833, # 2nd highest value; idx. 10
-9.85711836,
-5.96209236,
-1.13039161,
-7.1115294,
-0.8369633,
-5.3186408,
7.06427407,
0.81369344,
-0.82023817,
-5.9179796,
0.58813443,
-6.99778438,
4.71551189,
-0.18771637,
7.44020759, # 4th highest value; idx. 25
9.38450987, # 1st highest value; idx. 26
2.12662941,
-9.32562038,
2.35652522,
], # cummulative prob of 5 highest values <= 0.6
[
0.58425518,
4.53139238,
-5.57510464,
-6.28030699,
-7.19529503,
-4.02122551,
1.39337037,
-6.06707057,
1.59480517,
-9.643119,
0.03907799,
0.67231762,
-8.88206726,
6.27115922, # 4th highest value; idx. 13
2.28520723,
4.82767506,
4.30421368,
8.8275313, # 2nd highest value; idx. 17
5.44029958, # 5th highest value; idx. 18
-4.4735794,
7.38579536, # 3rd highest value; idx. 20
-2.91051663,
2.61946077,
-2.5674762,
-9.48959302,
-4.02922645,
-1.35416918,
9.67702323, # 1st highest value; idx. 27
-5.89478553,
1.85370467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
snake_case_ = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 1_0], [0, 2_5], [0, 2_6], [1, 1_3], [1, 1_7], [1, 1_8], [1, 2_0], [1, 2_7]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
snake_case_ = tf.convert_to_tensor(
[8.222099, 7.3534126, 8.432078, 7.4402075, 9.38451, 6.271159, 8.827531, 5.4402995, 7.3857956, 9.677023] , dtype=tf.floataa , ) # expected non filtered values as noted above
snake_case_ = tf_top_k_top_p_filtering(_UpperCamelCase , top_k=1_0 , top_p=0.6 , min_tokens_to_keep=4 )
snake_case_ = output[output != -float('''inf''' )]
snake_case_ = tf.cast(
tf.where(tf.not_equal(_UpperCamelCase , tf.constant(-float('''inf''' ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(_UpperCamelCase , _UpperCamelCase , rtol=1e-12 )
tf.debugging.assert_equal(_UpperCamelCase , _UpperCamelCase )
@require_tf
class snake_case_ ( unittest.TestCase , __A ):
'''simple docstring'''
if is_tf_available():
SCREAMING_SNAKE_CASE : Optional[int] = {
"AutoModelForCausalLM": TFAutoModelForCausalLM,
"AutoModelForSpeechSeq2Seq": TFAutoModelForSpeechSeqaSeq,
"AutoModelForSeq2SeqLM": TFAutoModelForSeqaSeqLM,
"AutoModelForVision2Seq": TFAutoModelForVisionaSeq,
"LogitsProcessorList": TFLogitsProcessorList,
"MinLengthLogitsProcessor": TFMinLengthLogitsProcessor,
"create_tensor_fn": tf.convert_to_tensor,
"floats_tensor": floats_tensor,
"return_tensors": "tf",
}
@slow
def snake_case__( self : List[Any] ) ->Optional[int]:
# TF-only test: tf.saved_model export
snake_case_ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
snake_case_ = 2
snake_case_ = 2
class snake_case_ ( tf.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , _UpperCamelCase : Optional[int] ) ->List[Any]:
super(_UpperCamelCase , self ).__init__()
snake_case_ = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((None, input_length) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=_UpperCamelCase , )
def snake_case__( self : List[Any] , _UpperCamelCase : int , _UpperCamelCase : Union[str, Any] ) ->List[Any]:
snake_case_ = self.model.generate(
input_ids=_UpperCamelCase , attention_mask=_UpperCamelCase , max_new_tokens=_UpperCamelCase , return_dict_in_generate=_UpperCamelCase , )
return {"sequences": outputs["sequences"]}
snake_case_ = [[2, 0], [1_0_2, 1_0_3]]
snake_case_ = [[1, 0], [1, 1]]
snake_case_ = DummyModel(model=_UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(_UpperCamelCase , _UpperCamelCase , signatures={'''serving_default''': dummy_model.serving} )
snake_case_ = tf.saved_model.load(_UpperCamelCase ).signatures['''serving_default''']
for batch_size in range(1 , len(_UpperCamelCase ) + 1 ):
snake_case_ = {
'''input_ids''': tf.constant(dummy_input_ids[:batch_size] ),
'''attention_mask''': tf.constant(dummy_attention_masks[:batch_size] ),
}
snake_case_ = serving_func(**_UpperCamelCase )['''sequences''']
snake_case_ = test_model.generate(**_UpperCamelCase , max_new_tokens=_UpperCamelCase )
tf.debugging.assert_equal(_UpperCamelCase , _UpperCamelCase )
@slow
def snake_case__( self : List[str] ) ->int:
# TF-only test: tf.saved_model export
snake_case_ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
snake_case_ = 1
snake_case_ = 2
class snake_case_ ( tf.Module ):
'''simple docstring'''
def __init__( self : str , _UpperCamelCase : Any ) ->List[str]:
super(_UpperCamelCase , self ).__init__()
snake_case_ = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((batch_size, None) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=_UpperCamelCase , )
def snake_case__( self : int , _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] ) ->Optional[int]:
snake_case_ = self.model.generate(
input_ids=_UpperCamelCase , attention_mask=_UpperCamelCase , max_new_tokens=_UpperCamelCase , return_dict_in_generate=_UpperCamelCase , )
return {"sequences": outputs["sequences"]}
snake_case_ = [[2], [1_0_2, 1_0_3]]
snake_case_ = [[1], [1, 1]]
snake_case_ = DummyModel(model=_UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(_UpperCamelCase , _UpperCamelCase , signatures={'''serving_default''': dummy_model.serving} )
snake_case_ = tf.saved_model.load(_UpperCamelCase ).signatures['''serving_default''']
for input_row in range(len(_UpperCamelCase ) ):
snake_case_ = {
'''input_ids''': tf.constant([dummy_input_ids[input_row]] ),
'''attention_mask''': tf.constant([dummy_attention_masks[input_row]] ),
}
snake_case_ = serving_func(**_UpperCamelCase )['''sequences''']
snake_case_ = test_model.generate(**_UpperCamelCase , max_new_tokens=_UpperCamelCase )
tf.debugging.assert_equal(_UpperCamelCase , _UpperCamelCase )
@slow
@require_tensorflow_text
def snake_case__( self : Optional[Any] ) ->List[Any]:
# TF-only test: tf.saved_model export
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id='''google/flan-t5-small''' , filename='''spiece.model''' , local_dir=_UpperCamelCase )
class snake_case_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Tuple ) ->List[Any]:
super().__init__()
snake_case_ = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(_UpperCamelCase , '''spiece.model''' ) , '''rb''' ).read() )
snake_case_ = TFAutoModelForSeqaSeqLM.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
def snake_case__( self : Optional[Any] , _UpperCamelCase : List[Any] , *_UpperCamelCase : Optional[int] , **_UpperCamelCase : str ) ->List[Any]:
snake_case_ = self.tokenizer.tokenize(_UpperCamelCase )
snake_case_, snake_case_ = text.pad_model_inputs(
_UpperCamelCase , max_seq_length=6_4 , pad_value=self.model.config.pad_token_id )
snake_case_ = self.model.generate(input_ids=_UpperCamelCase , attention_mask=_UpperCamelCase )
return self.tokenizer.detokenize(_UpperCamelCase )
snake_case_ = CompleteSentenceTransformer()
snake_case_ = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='''inputs''' )
snake_case_ = complete_model(_UpperCamelCase )
snake_case_ = tf.keras.Model(_UpperCamelCase , _UpperCamelCase )
keras_model.save(_UpperCamelCase )
def snake_case__( self : Any ) ->List[Any]:
# Has PT equivalent: this test relies on random sampling
snake_case_ = {
'''do_sample''': True,
'''num_beams''': 1,
'''top_p''': 0.7,
'''top_k''': 1_0,
'''temperature''': 0.7,
}
snake_case_ = 1_4
snake_case_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
snake_case_ = '''Hello, my dog is cute and'''
snake_case_ = tokenizer(_UpperCamelCase , return_tensors='''tf''' )
snake_case_ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
snake_case_ = 6_3_8
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
snake_case_ = model.generate(**_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
self.assertTrue(expectation == len(generated_tokens[0] ) )
snake_case_ = [6_3_8, 1_9_8]
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
snake_case_ = model.generate(**_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def snake_case__( self : str ) ->Dict:
# Has PT equivalent: ample use of framework-specific code
snake_case_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
snake_case_ = '''Hugging Face is a technology company based in New York and Paris.'''
snake_case_ = bart_tokenizer(_UpperCamelCase , return_tensors='''tf''' ).input_ids
snake_case_ = TFBartForConditionalGeneration.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
snake_case_ = bart_model.generate(_UpperCamelCase ).numpy()
class snake_case_ ( __A ):
'''simple docstring'''
def snake_case__( self : str , _UpperCamelCase : Any , _UpperCamelCase : Tuple=None , **_UpperCamelCase : Optional[int] ) ->List[str]:
return super().call(_UpperCamelCase , **_UpperCamelCase )
snake_case_ = FakeBart.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
snake_case_ = bart_model.generate(_UpperCamelCase , foo='''bar''' ).numpy()
self.assertTrue(np.array_equal(_UpperCamelCase , _UpperCamelCase ) )
class snake_case_ ( bart_model.model.encoder.__class__ ):
'''simple docstring'''
def snake_case__( self : Union[str, Any] , _UpperCamelCase : str , **_UpperCamelCase : Tuple ) ->Optional[Any]:
return super().call(_UpperCamelCase , **_UpperCamelCase )
snake_case_ = FakeEncoder(bart_model.config , bart_model.model.shared )
snake_case_ = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
snake_case_ = bart_model.generate(_UpperCamelCase ).numpy()
with self.assertRaises(_UpperCamelCase ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(_UpperCamelCase , foo='''bar''' ) | 39 | 0 |
def __SCREAMING_SNAKE_CASE ( a__ : Union[str, Any] ) -> int:
stooge(a__ ,0 ,len(a__ ) - 1 )
return arr
def __SCREAMING_SNAKE_CASE ( a__ : Optional[int] ,a__ : Tuple ,a__ : str ) -> Any:
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
__A , __A : Optional[int] = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
__A : List[Any] = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(a__ ,a__ ,(h - t) )
# Recursively sort last 2/3 elements
stooge(a__ ,i + t ,(a__) )
# Recursively sort first 2/3 elements
stooge(a__ ,a__ ,(h - t) )
if __name__ == "__main__":
UpperCAmelCase_ : Tuple = input('''Enter numbers separated by a comma:\n''').strip()
UpperCAmelCase_ : Any = [int(item) for item in user_input.split(''',''')]
print(stooge_sort(unsorted))
| 17 |
import unittest
from transformers import DonutProcessor
lowerCAmelCase_ = '''naver-clova-ix/donut-base'''
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : Union[str, Any] ) ->Any:
snake_case_ = DonutProcessor.from_pretrained(_UpperCamelCase )
def snake_case__( self : Dict ) ->str:
snake_case_ = {
'''name''': '''John Doe''',
'''age''': '''99''',
'''city''': '''Atlanta''',
'''state''': '''GA''',
'''zip''': '''30301''',
'''phone''': '''123-4567''',
'''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}],
}
snake_case_ = (
'''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'''
'''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'''
'''<s_nicknames><s_nickname>Johnny</s_nickname>'''
'''<sep/><s_nickname>JD</s_nickname></s_nicknames>'''
)
snake_case_ = self.processor.tokenajson(_UpperCamelCase )
self.assertDictEqual(_UpperCamelCase , _UpperCamelCase ) | 39 | 0 |
'''simple docstring'''
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : List[str] = "AutoTokenizer"
__lowerCamelCase : Tuple = ["tokenizer"]
__lowerCamelCase : List[str] = {
"semantic_prompt": 1,
"coarse_prompt": 2,
"fine_prompt": 2,
}
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=None ) -> int:
super().__init__(_lowerCAmelCase )
_lowerCAmelCase = speaker_embeddings
@classmethod
def _snake_case ( cls , _lowerCAmelCase , _lowerCAmelCase="speaker_embeddings_path.json" , **_lowerCAmelCase ) -> List[Any]:
if speaker_embeddings_dict_path is not None:
_lowerCAmelCase = get_file_from_repo(
_lowerCAmelCase , _lowerCAmelCase , subfolder=kwargs.pop("subfolder" , _lowerCAmelCase ) , cache_dir=kwargs.pop("cache_dir" , _lowerCAmelCase ) , force_download=kwargs.pop("force_download" , _lowerCAmelCase ) , proxies=kwargs.pop("proxies" , _lowerCAmelCase ) , resume_download=kwargs.pop("resume_download" , _lowerCAmelCase ) , local_files_only=kwargs.pop("local_files_only" , _lowerCAmelCase ) , use_auth_token=kwargs.pop("use_auth_token" , _lowerCAmelCase ) , revision=kwargs.pop("revision" , _lowerCAmelCase ) , )
if speaker_embeddings_path is None:
logger.warning(
f'''`{os.path.join(_lowerCAmelCase , _lowerCAmelCase )}` does not exists
, no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json
dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.''' )
_lowerCAmelCase = None
else:
with open(_lowerCAmelCase ) as speaker_embeddings_json:
_lowerCAmelCase = json.load(_lowerCAmelCase )
else:
_lowerCAmelCase = None
_lowerCAmelCase = AutoTokenizer.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
return cls(tokenizer=_lowerCAmelCase , speaker_embeddings=_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase="speaker_embeddings_path.json" , _lowerCAmelCase="speaker_embeddings" , _lowerCAmelCase = False , **_lowerCAmelCase , ) -> List[str]:
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(_lowerCAmelCase , _lowerCAmelCase , "v2" ) , exist_ok=_lowerCAmelCase )
_lowerCAmelCase = {}
_lowerCAmelCase = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
_lowerCAmelCase = self._load_voice_preset(_lowerCAmelCase )
_lowerCAmelCase = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["repo_or_path"] , _lowerCAmelCase , f'''{prompt_key}_{key}''' ) , voice_preset[key] , allow_pickle=_lowerCAmelCase , )
_lowerCAmelCase = os.path.join(_lowerCAmelCase , f'''{prompt_key}_{key}.npy''' )
_lowerCAmelCase = tmp_dict
with open(os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , "w" ) as fp:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
super().save_pretrained(_lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase = None , **_lowerCAmelCase ) -> Dict:
_lowerCAmelCase = self.speaker_embeddings[voice_preset]
_lowerCAmelCase = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
f'''Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].''' )
_lowerCAmelCase = get_file_from_repo(
self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] , subfolder=kwargs.pop("subfolder" , _lowerCAmelCase ) , cache_dir=kwargs.pop("cache_dir" , _lowerCAmelCase ) , force_download=kwargs.pop("force_download" , _lowerCAmelCase ) , proxies=kwargs.pop("proxies" , _lowerCAmelCase ) , resume_download=kwargs.pop("resume_download" , _lowerCAmelCase ) , local_files_only=kwargs.pop("local_files_only" , _lowerCAmelCase ) , use_auth_token=kwargs.pop("use_auth_token" , _lowerCAmelCase ) , revision=kwargs.pop("revision" , _lowerCAmelCase ) , )
if path is None:
raise ValueError(
f'''`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists
, no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}
embeddings.''' )
_lowerCAmelCase = np.load(_lowerCAmelCase )
return voice_preset_dict
def _snake_case ( self , _lowerCAmelCase = None ) -> Any:
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(f'''Voice preset unrecognized, missing {key} as a key.''' )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(f'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(f'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
def __call__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase="pt" , _lowerCAmelCase=256 , _lowerCAmelCase=False , _lowerCAmelCase=True , _lowerCAmelCase=False , **_lowerCAmelCase , ) -> List[str]:
if voice_preset is not None and not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
if (
isinstance(_lowerCAmelCase , _lowerCAmelCase )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
_lowerCAmelCase = self._load_voice_preset(_lowerCAmelCase )
else:
if isinstance(_lowerCAmelCase , _lowerCAmelCase ) and not voice_preset.endswith(".npz" ):
_lowerCAmelCase = voice_preset + ".npz"
_lowerCAmelCase = np.load(_lowerCAmelCase )
if voice_preset is not None:
self._validate_voice_preset_dict(_lowerCAmelCase , **_lowerCAmelCase )
_lowerCAmelCase = BatchFeature(data=_lowerCAmelCase , tensor_type=_lowerCAmelCase )
_lowerCAmelCase = self.tokenizer(
_lowerCAmelCase , return_tensors=_lowerCAmelCase , padding="max_length" , max_length=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , **_lowerCAmelCase , )
if voice_preset is not None:
_lowerCAmelCase = voice_preset
return encoded_text
| 18 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
if not nums:
raise ValueError('''List is empty''' )
return sum(SCREAMING_SNAKE_CASE__ ) / len(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod() | 39 | 0 |
"""simple docstring"""
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class _UpperCAmelCase( lowerCamelCase ):
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = pa.array(TypedSequence([1, 2, 3]))
self.assertEqual(arr.type , pa.intaa())
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
with self.assertRaises(__a):
_UpperCamelCase = pa.array(TypedSequence([1, 2, 3]) , type=pa.intaa())
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
with self.assertRaises(__a):
_UpperCamelCase = pa.array(TypedSequence([1, 2, 3] , try_type=Value('''bool''') , type=Value('''int64''')))
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = pa.array(TypedSequence([1, 2, 3] , type=Value('''int32''')))
self.assertEqual(arr.type , pa.intaa())
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
with self.assertRaises((TypeError, pa.lib.ArrowInvalid)):
_UpperCamelCase = pa.array(TypedSequence(['''foo''', '''bar'''] , type=Value('''int64''')))
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase = pa.array(TypedSequence([1, 2, 3] , try_type=Value('''int32''')))
self.assertEqual(arr.type , pa.intaa())
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = pa.array(TypedSequence(['''foo''', '''bar'''] , try_type=Value('''int64''')))
self.assertEqual(arr.type , pa.string())
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , '''int64''')))
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , '''int64'''))
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
with self.assertRaises((TypeError, pa.lib.ArrowInvalid)):
_UpperCamelCase = pa.array(TypedSequence(['''foo''', '''bar'''] , type=ArrayaD((1, 3) , '''int64''')))
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , '''int64''')))
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , '''int64'''))
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = pa.array(TypedSequence(['''foo''', '''bar'''] , try_type=ArrayaD((1, 3) , '''int64''')))
self.assertEqual(arr.type , pa.string())
@require_pil
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
import PIL.Image
_UpperCamelCase = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta).reshape(2 , 5))
with patch(
'''datasets.arrow_writer.cast_to_python_objects''' , side_effect=__a) as mock_cast_to_python_objects:
_UpperCamelCase = pa.array(TypedSequence([{'''path''': None, '''bytes''': B'''image_bytes'''}, pil_image] , type=Image()))
_UpperCamelCase , _UpperCamelCase = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn('''optimize_list_casting''' , __a)
self.assertFalse(kwargs['''optimize_list_casting'''])
def lowerCamelCase__ ( __snake_case, __snake_case ) -> Dict:
"""simple docstring"""
_UpperCamelCase = pa.BufferReader(__snake_case ) if isinstance(__snake_case, pa.Buffer ) else pa.memory_map(__snake_case )
_UpperCamelCase = pa.ipc.open_stream(__snake_case )
_UpperCamelCase = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize('''writer_batch_size''', [None, 1, 10] )
@pytest.mark.parametrize(
'''fields''', [None, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}] )
def lowerCamelCase__ ( __snake_case, __snake_case ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = pa.BufferOutputStream()
_UpperCamelCase = pa.schema(__snake_case ) if fields else None
with ArrowWriter(stream=__snake_case, schema=__snake_case, writer_batch_size=__snake_case ) as writer:
writer.write({'''col_1''': '''foo''', '''col_2''': 1} )
writer.write({'''col_1''': '''bar''', '''col_2''': 2} )
_UpperCamelCase , _UpperCamelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCamelCase = {'''col_1''': pa.string(), '''col_2''': pa.intaa()}
assert writer._schema == pa.schema(__snake_case, metadata=writer._schema.metadata )
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def lowerCamelCase__ ( ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = pa.BufferOutputStream()
_UpperCamelCase = Features({'''labels''': ClassLabel(names=['''neg''', '''pos'''] )} )
with ArrowWriter(stream=__snake_case, features=__snake_case ) as writer:
writer.write({'''labels''': 0} )
writer.write({'''labels''': 1} )
_UpperCamelCase , _UpperCamelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
_UpperCamelCase = pa.BufferReader(output.getvalue() )
_UpperCamelCase = pa.ipc.open_stream(__snake_case )
_UpperCamelCase = f.read_all()
_UpperCamelCase = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(__snake_case )
@pytest.mark.parametrize('''writer_batch_size''', [None, 1, 10] )
def lowerCamelCase__ ( __snake_case ) -> Dict:
"""simple docstring"""
_UpperCamelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=__snake_case, writer_batch_size=__snake_case, hash_salt='''split_name''', check_duplicates=__snake_case, ) as writer:
with pytest.raises(__snake_case ):
writer.write({'''col_1''': '''foo''', '''col_2''': 1}, key=[1, 2] )
_UpperCamelCase , _UpperCamelCase = writer.finalize()
@pytest.mark.parametrize('''writer_batch_size''', [None, 2, 10] )
def lowerCamelCase__ ( __snake_case ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=__snake_case, writer_batch_size=__snake_case, hash_salt='''split_name''', check_duplicates=__snake_case, ) as writer:
with pytest.raises(__snake_case ):
writer.write({'''col_1''': '''foo''', '''col_2''': 1}, key=10 )
writer.write({'''col_1''': '''bar''', '''col_2''': 2}, key=10 )
_UpperCamelCase , _UpperCamelCase = writer.finalize()
@pytest.mark.parametrize('''writer_batch_size''', [None, 2, 10] )
def lowerCamelCase__ ( __snake_case ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=__snake_case, writer_batch_size=__snake_case, hash_salt='''split_name''', check_duplicates=__snake_case, ) as writer:
writer.write({'''col_1''': '''foo''', '''col_2''': 1}, key=1 )
writer.write({'''col_1''': '''bar''', '''col_2''': 2}, key=2 )
_UpperCamelCase , _UpperCamelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('''writer_batch_size''', [None, 1, 10] )
@pytest.mark.parametrize(
'''fields''', [None, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}] )
def lowerCamelCase__ ( __snake_case, __snake_case ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = pa.BufferOutputStream()
_UpperCamelCase = pa.schema(__snake_case ) if fields else None
with ArrowWriter(stream=__snake_case, schema=__snake_case, writer_batch_size=__snake_case ) as writer:
writer.write_batch({'''col_1''': ['''foo''', '''bar'''], '''col_2''': [1, 2]} )
writer.write_batch({'''col_1''': [], '''col_2''': []} )
_UpperCamelCase , _UpperCamelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCamelCase = {'''col_1''': pa.string(), '''col_2''': pa.intaa()}
assert writer._schema == pa.schema(__snake_case, metadata=writer._schema.metadata )
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('''writer_batch_size''', [None, 1, 10] )
@pytest.mark.parametrize(
'''fields''', [None, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}] )
def lowerCamelCase__ ( __snake_case, __snake_case ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = pa.BufferOutputStream()
_UpperCamelCase = pa.schema(__snake_case ) if fields else None
with ArrowWriter(stream=__snake_case, schema=__snake_case, writer_batch_size=__snake_case ) as writer:
writer.write_table(pa.Table.from_pydict({'''col_1''': ['''foo''', '''bar'''], '''col_2''': [1, 2]} ) )
_UpperCamelCase , _UpperCamelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCamelCase = {'''col_1''': pa.string(), '''col_2''': pa.intaa()}
assert writer._schema == pa.schema(__snake_case, metadata=writer._schema.metadata )
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('''writer_batch_size''', [None, 1, 10] )
@pytest.mark.parametrize(
'''fields''', [None, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}] )
def lowerCamelCase__ ( __snake_case, __snake_case ) -> Any:
"""simple docstring"""
_UpperCamelCase = pa.BufferOutputStream()
_UpperCamelCase = pa.schema(__snake_case ) if fields else None
with ArrowWriter(stream=__snake_case, schema=__snake_case, writer_batch_size=__snake_case ) as writer:
writer.write_row(pa.Table.from_pydict({'''col_1''': ['''foo'''], '''col_2''': [1]} ) )
writer.write_row(pa.Table.from_pydict({'''col_1''': ['''bar'''], '''col_2''': [2]} ) )
_UpperCamelCase , _UpperCamelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCamelCase = {'''col_1''': pa.string(), '''col_2''': pa.intaa()}
assert writer._schema == pa.schema(__snake_case, metadata=writer._schema.metadata )
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def lowerCamelCase__ ( ) -> str:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCamelCase = {'''col_1''': pa.string(), '''col_2''': pa.intaa()}
_UpperCamelCase = os.path.join(__snake_case, '''test.arrow''' )
with ArrowWriter(path=__snake_case, schema=pa.schema(__snake_case ) ) as writer:
writer.write_batch({'''col_1''': ['''foo''', '''bar'''], '''col_2''': [1, 2]} )
_UpperCamelCase , _UpperCamelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(__snake_case, metadata=writer._schema.metadata )
_check_output(__snake_case, 1 )
def lowerCamelCase__ ( __snake_case ) -> Optional[int]:
"""simple docstring"""
if pa.types.is_list(__snake_case ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def lowerCamelCase__ ( __snake_case, __snake_case ) -> List[Any]:
"""simple docstring"""
if isinstance(lst[0], __snake_case ):
change_first_primitive_element_in_list(lst[0], __snake_case )
else:
_UpperCamelCase = value
@pytest.mark.parametrize('''optimized_int_type, expected_dtype''', [(None, pa.intaa()), (Value('''int32''' ), pa.intaa())] )
@pytest.mark.parametrize('''sequence''', [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = pa.array(TypedSequence(__snake_case, optimized_int_type=__snake_case ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
'''col, expected_dtype''', [
('''attention_mask''', pa.inta()),
('''special_tokens_mask''', pa.inta()),
('''token_type_ids''', pa.inta()),
('''input_ids''', pa.intaa()),
('''other''', pa.intaa()),
], )
@pytest.mark.parametrize('''sequence''', [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = pa.array(OptimizedTypedSequence(__snake_case, col=__snake_case ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
_UpperCamelCase = copy.deepcopy(__snake_case )
_UpperCamelCase = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(__snake_case, __snake_case )
_UpperCamelCase = pa.array(OptimizedTypedSequence(__snake_case, col=__snake_case ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize('''raise_exception''', [False, True] )
def lowerCamelCase__ ( __snake_case, __snake_case ) -> Any:
"""simple docstring"""
_UpperCamelCase = str(tmp_path / '''dataset-train.arrow''' )
try:
with ArrowWriter(path=__snake_case ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def lowerCamelCase__ ( __snake_case ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = '''mock://dataset-train.arrow'''
with ArrowWriter(path=__snake_case, storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs, type(__snake_case ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({'''col_1''': '''foo''', '''col_2''': 1} )
writer.write({'''col_1''': '''bar''', '''col_2''': 2} )
_UpperCamelCase , _UpperCamelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(__snake_case )
def lowerCamelCase__ ( ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = pa.BufferOutputStream()
with ParquetWriter(stream=__snake_case ) as writer:
writer.write({'''col_1''': '''foo''', '''col_2''': 1} )
writer.write({'''col_1''': '''bar''', '''col_2''': 2} )
_UpperCamelCase , _UpperCamelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_UpperCamelCase = pa.BufferReader(output.getvalue() )
_UpperCamelCase = pq.read_table(__snake_case )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize('''embed_local_files''', [False, True] )
def lowerCamelCase__ ( __snake_case, __snake_case ) -> List[str]:
"""simple docstring"""
import PIL.Image
_UpperCamelCase = str(tmp_path / '''test_image_rgb.jpg''' )
PIL.Image.fromarray(np.zeros((5, 5), dtype=np.uinta ) ).save(__snake_case, format='''png''' )
_UpperCamelCase = pa.BufferOutputStream()
with ParquetWriter(
stream=__snake_case, features=Features({'''image''': Image()} ), embed_local_files=__snake_case ) as writer:
writer.write({'''image''': image_path} )
writer.finalize()
_UpperCamelCase = pa.BufferReader(output.getvalue() )
_UpperCamelCase = pq.read_table(__snake_case )
_UpperCamelCase = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out['''image'''][0]['''path'''], __snake_case )
with open(__snake_case, '''rb''' ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def lowerCamelCase__ ( ) -> int:
"""simple docstring"""
_UpperCamelCase = pa.schema([pa.field('''col_1''', pa.string(), nullable=__snake_case )] )
_UpperCamelCase = pa.BufferOutputStream()
with ArrowWriter(stream=__snake_case ) as writer:
writer._build_writer(inferred_schema=__snake_case )
assert writer._schema == pa.schema([pa.field('''col_1''', pa.string() )] )
| 19 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : List[str] ) ->str:
snake_case_ = inspect.getfile(accelerate.test_utils )
snake_case_ = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps''', '''test_metrics.py'''] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
snake_case_ = test_metrics
@require_cpu
def snake_case__( self : str ) ->int:
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def snake_case__( self : Union[str, Any] ) ->Any:
debug_launcher(self.test_metrics.main )
@require_single_gpu
def snake_case__( self : List[Any] ) ->Tuple:
self.test_metrics.main()
@require_multi_gpu
def snake_case__( self : Any ) ->Union[str, Any]:
print(f'''Found {torch.cuda.device_count()} devices.''' )
snake_case_ = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() ) | 39 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.